linux/kernel/trace/ring_buffer.c
<<
>>
Prefs
   1/*
   2 * Generic ring buffer
   3 *
   4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
   5 */
   6#include <linux/ring_buffer.h>
   7#include <linux/trace_clock.h>
   8#include <linux/spinlock.h>
   9#include <linux/debugfs.h>
  10#include <linux/uaccess.h>
  11#include <linux/hardirq.h>
  12#include <linux/kmemcheck.h>
  13#include <linux/module.h>
  14#include <linux/percpu.h>
  15#include <linux/mutex.h>
  16#include <linux/slab.h>
  17#include <linux/init.h>
  18#include <linux/hash.h>
  19#include <linux/list.h>
  20#include <linux/cpu.h>
  21#include <linux/fs.h>
  22
  23#include <asm/local.h>
  24#include "trace.h"
  25
  26static void update_pages_handler(struct work_struct *work);
  27
  28/*
  29 * The ring buffer header is special. We must manually up keep it.
  30 */
  31int ring_buffer_print_entry_header(struct trace_seq *s)
  32{
  33        int ret;
  34
  35        ret = trace_seq_printf(s, "# compressed entry header\n");
  36        ret = trace_seq_printf(s, "\ttype_len    :    5 bits\n");
  37        ret = trace_seq_printf(s, "\ttime_delta  :   27 bits\n");
  38        ret = trace_seq_printf(s, "\tarray       :   32 bits\n");
  39        ret = trace_seq_printf(s, "\n");
  40        ret = trace_seq_printf(s, "\tpadding     : type == %d\n",
  41                               RINGBUF_TYPE_PADDING);
  42        ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
  43                               RINGBUF_TYPE_TIME_EXTEND);
  44        ret = trace_seq_printf(s, "\tdata max type_len  == %d\n",
  45                               RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  46
  47        return ret;
  48}
  49
  50/*
  51 * The ring buffer is made up of a list of pages. A separate list of pages is
  52 * allocated for each CPU. A writer may only write to a buffer that is
  53 * associated with the CPU it is currently executing on.  A reader may read
  54 * from any per cpu buffer.
  55 *
  56 * The reader is special. For each per cpu buffer, the reader has its own
  57 * reader page. When a reader has read the entire reader page, this reader
  58 * page is swapped with another page in the ring buffer.
  59 *
  60 * Now, as long as the writer is off the reader page, the reader can do what
  61 * ever it wants with that page. The writer will never write to that page
  62 * again (as long as it is out of the ring buffer).
  63 *
  64 * Here's some silly ASCII art.
  65 *
  66 *   +------+
  67 *   |reader|          RING BUFFER
  68 *   |page  |
  69 *   +------+        +---+   +---+   +---+
  70 *                   |   |-->|   |-->|   |
  71 *                   +---+   +---+   +---+
  72 *                     ^               |
  73 *                     |               |
  74 *                     +---------------+
  75 *
  76 *
  77 *   +------+
  78 *   |reader|          RING BUFFER
  79 *   |page  |------------------v
  80 *   +------+        +---+   +---+   +---+
  81 *                   |   |-->|   |-->|   |
  82 *                   +---+   +---+   +---+
  83 *                     ^               |
  84 *                     |               |
  85 *                     +---------------+
  86 *
  87 *
  88 *   +------+
  89 *   |reader|          RING BUFFER
  90 *   |page  |------------------v
  91 *   +------+        +---+   +---+   +---+
  92 *      ^            |   |-->|   |-->|   |
  93 *      |            +---+   +---+   +---+
  94 *      |                              |
  95 *      |                              |
  96 *      +------------------------------+
  97 *
  98 *
  99 *   +------+
 100 *   |buffer|          RING BUFFER
 101 *   |page  |------------------v
 102 *   +------+        +---+   +---+   +---+
 103 *      ^            |   |   |   |-->|   |
 104 *      |   New      +---+   +---+   +---+
 105 *      |  Reader------^               |
 106 *      |   page                       |
 107 *      +------------------------------+
 108 *
 109 *
 110 * After we make this swap, the reader can hand this page off to the splice
 111 * code and be done with it. It can even allocate a new page if it needs to
 112 * and swap that into the ring buffer.
 113 *
 114 * We will be using cmpxchg soon to make all this lockless.
 115 *
 116 */
 117
 118/*
 119 * A fast way to enable or disable all ring buffers is to
 120 * call tracing_on or tracing_off. Turning off the ring buffers
 121 * prevents all ring buffers from being recorded to.
 122 * Turning this switch on, makes it OK to write to the
 123 * ring buffer, if the ring buffer is enabled itself.
 124 *
 125 * There's three layers that must be on in order to write
 126 * to the ring buffer.
 127 *
 128 * 1) This global flag must be set.
 129 * 2) The ring buffer must be enabled for recording.
 130 * 3) The per cpu buffer must be enabled for recording.
 131 *
 132 * In case of an anomaly, this global flag has a bit set that
 133 * will permantly disable all ring buffers.
 134 */
 135
 136/*
 137 * Global flag to disable all recording to ring buffers
 138 *  This has two bits: ON, DISABLED
 139 *
 140 *  ON   DISABLED
 141 * ---- ----------
 142 *   0      0        : ring buffers are off
 143 *   1      0        : ring buffers are on
 144 *   X      1        : ring buffers are permanently disabled
 145 */
 146
 147enum {
 148        RB_BUFFERS_ON_BIT       = 0,
 149        RB_BUFFERS_DISABLED_BIT = 1,
 150};
 151
 152enum {
 153        RB_BUFFERS_ON           = 1 << RB_BUFFERS_ON_BIT,
 154        RB_BUFFERS_DISABLED     = 1 << RB_BUFFERS_DISABLED_BIT,
 155};
 156
 157static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
 158
 159/* Used for individual buffers (after the counter) */
 160#define RB_BUFFER_OFF           (1 << 20)
 161
 162#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
 163
 164/**
 165 * tracing_off_permanent - permanently disable ring buffers
 166 *
 167 * This function, once called, will disable all ring buffers
 168 * permanently.
 169 */
 170void tracing_off_permanent(void)
 171{
 172        set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
 173}
 174
 175#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
 176#define RB_ALIGNMENT            4U
 177#define RB_MAX_SMALL_DATA       (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 178#define RB_EVNT_MIN_SIZE        8U      /* two 32bit words */
 179
 180#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
 181# define RB_FORCE_8BYTE_ALIGNMENT       0
 182# define RB_ARCH_ALIGNMENT              RB_ALIGNMENT
 183#else
 184# define RB_FORCE_8BYTE_ALIGNMENT       1
 185# define RB_ARCH_ALIGNMENT              8U
 186#endif
 187
 188/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
 189#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
 190
 191enum {
 192        RB_LEN_TIME_EXTEND = 8,
 193        RB_LEN_TIME_STAMP = 16,
 194};
 195
 196#define skip_time_extend(event) \
 197        ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
 198
 199static inline int rb_null_event(struct ring_buffer_event *event)
 200{
 201        return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
 202}
 203
 204static void rb_event_set_padding(struct ring_buffer_event *event)
 205{
 206        /* padding has a NULL time_delta */
 207        event->type_len = RINGBUF_TYPE_PADDING;
 208        event->time_delta = 0;
 209}
 210
 211static unsigned
 212rb_event_data_length(struct ring_buffer_event *event)
 213{
 214        unsigned length;
 215
 216        if (event->type_len)
 217                length = event->type_len * RB_ALIGNMENT;
 218        else
 219                length = event->array[0];
 220        return length + RB_EVNT_HDR_SIZE;
 221}
 222
 223/*
 224 * Return the length of the given event. Will return
 225 * the length of the time extend if the event is a
 226 * time extend.
 227 */
 228static inline unsigned
 229rb_event_length(struct ring_buffer_event *event)
 230{
 231        switch (event->type_len) {
 232        case RINGBUF_TYPE_PADDING:
 233                if (rb_null_event(event))
 234                        /* undefined */
 235                        return -1;
 236                return  event->array[0] + RB_EVNT_HDR_SIZE;
 237
 238        case RINGBUF_TYPE_TIME_EXTEND:
 239                return RB_LEN_TIME_EXTEND;
 240
 241        case RINGBUF_TYPE_TIME_STAMP:
 242                return RB_LEN_TIME_STAMP;
 243
 244        case RINGBUF_TYPE_DATA:
 245                return rb_event_data_length(event);
 246        default:
 247                BUG();
 248        }
 249        /* not hit */
 250        return 0;
 251}
 252
 253/*
 254 * Return total length of time extend and data,
 255 *   or just the event length for all other events.
 256 */
 257static inline unsigned
 258rb_event_ts_length(struct ring_buffer_event *event)
 259{
 260        unsigned len = 0;
 261
 262        if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
 263                /* time extends include the data event after it */
 264                len = RB_LEN_TIME_EXTEND;
 265                event = skip_time_extend(event);
 266        }
 267        return len + rb_event_length(event);
 268}
 269
 270/**
 271 * ring_buffer_event_length - return the length of the event
 272 * @event: the event to get the length of
 273 *
 274 * Returns the size of the data load of a data event.
 275 * If the event is something other than a data event, it
 276 * returns the size of the event itself. With the exception
 277 * of a TIME EXTEND, where it still returns the size of the
 278 * data load of the data event after it.
 279 */
 280unsigned ring_buffer_event_length(struct ring_buffer_event *event)
 281{
 282        unsigned length;
 283
 284        if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
 285                event = skip_time_extend(event);
 286
 287        length = rb_event_length(event);
 288        if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 289                return length;
 290        length -= RB_EVNT_HDR_SIZE;
 291        if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
 292                length -= sizeof(event->array[0]);
 293        return length;
 294}
 295EXPORT_SYMBOL_GPL(ring_buffer_event_length);
 296
 297/* inline for ring buffer fast paths */
 298static void *
 299rb_event_data(struct ring_buffer_event *event)
 300{
 301        if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
 302                event = skip_time_extend(event);
 303        BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
 304        /* If length is in len field, then array[0] has the data */
 305        if (event->type_len)
 306                return (void *)&event->array[0];
 307        /* Otherwise length is in array[0] and array[1] has the data */
 308        return (void *)&event->array[1];
 309}
 310
 311/**
 312 * ring_buffer_event_data - return the data of the event
 313 * @event: the event to get the data from
 314 */
 315void *ring_buffer_event_data(struct ring_buffer_event *event)
 316{
 317        return rb_event_data(event);
 318}
 319EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 320
 321#define for_each_buffer_cpu(buffer, cpu)                \
 322        for_each_cpu(cpu, buffer->cpumask)
 323
 324#define TS_SHIFT        27
 325#define TS_MASK         ((1ULL << TS_SHIFT) - 1)
 326#define TS_DELTA_TEST   (~TS_MASK)
 327
 328/* Flag when events were overwritten */
 329#define RB_MISSED_EVENTS        (1 << 31)
 330/* Missed count stored at end */
 331#define RB_MISSED_STORED        (1 << 30)
 332
 333struct buffer_data_page {
 334        u64              time_stamp;    /* page time stamp */
 335        local_t          commit;        /* write committed index */
 336        unsigned char    data[];        /* data of buffer page */
 337};
 338
 339/*
 340 * Note, the buffer_page list must be first. The buffer pages
 341 * are allocated in cache lines, which means that each buffer
 342 * page will be at the beginning of a cache line, and thus
 343 * the least significant bits will be zero. We use this to
 344 * add flags in the list struct pointers, to make the ring buffer
 345 * lockless.
 346 */
 347struct buffer_page {
 348        struct list_head list;          /* list of buffer pages */
 349        local_t          write;         /* index for next write */
 350        unsigned         read;          /* index for next read */
 351        local_t          entries;       /* entries on this page */
 352        unsigned long    real_end;      /* real end of data */
 353        struct buffer_data_page *page;  /* Actual data page */
 354};
 355
 356/*
 357 * The buffer page counters, write and entries, must be reset
 358 * atomically when crossing page boundaries. To synchronize this
 359 * update, two counters are inserted into the number. One is
 360 * the actual counter for the write position or count on the page.
 361 *
 362 * The other is a counter of updaters. Before an update happens
 363 * the update partition of the counter is incremented. This will
 364 * allow the updater to update the counter atomically.
 365 *
 366 * The counter is 20 bits, and the state data is 12.
 367 */
 368#define RB_WRITE_MASK           0xfffff
 369#define RB_WRITE_INTCNT         (1 << 20)
 370
 371static void rb_init_page(struct buffer_data_page *bpage)
 372{
 373        local_set(&bpage->commit, 0);
 374}
 375
 376/**
 377 * ring_buffer_page_len - the size of data on the page.
 378 * @page: The page to read
 379 *
 380 * Returns the amount of data on the page, including buffer page header.
 381 */
 382size_t ring_buffer_page_len(void *page)
 383{
 384        return local_read(&((struct buffer_data_page *)page)->commit)
 385                + BUF_PAGE_HDR_SIZE;
 386}
 387
 388/*
 389 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
 390 * this issue out.
 391 */
 392static void free_buffer_page(struct buffer_page *bpage)
 393{
 394        free_page((unsigned long)bpage->page);
 395        kfree(bpage);
 396}
 397
 398/*
 399 * We need to fit the time_stamp delta into 27 bits.
 400 */
 401static inline int test_time_stamp(u64 delta)
 402{
 403        if (delta & TS_DELTA_TEST)
 404                return 1;
 405        return 0;
 406}
 407
 408#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
 409
 410/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
 411#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
 412
 413int ring_buffer_print_page_header(struct trace_seq *s)
 414{
 415        struct buffer_data_page field;
 416        int ret;
 417
 418        ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
 419                               "offset:0;\tsize:%u;\tsigned:%u;\n",
 420                               (unsigned int)sizeof(field.time_stamp),
 421                               (unsigned int)is_signed_type(u64));
 422
 423        ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
 424                               "offset:%u;\tsize:%u;\tsigned:%u;\n",
 425                               (unsigned int)offsetof(typeof(field), commit),
 426                               (unsigned int)sizeof(field.commit),
 427                               (unsigned int)is_signed_type(long));
 428
 429        ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
 430                               "offset:%u;\tsize:%u;\tsigned:%u;\n",
 431                               (unsigned int)offsetof(typeof(field), commit),
 432                               1,
 433                               (unsigned int)is_signed_type(long));
 434
 435        ret = trace_seq_printf(s, "\tfield: char data;\t"
 436                               "offset:%u;\tsize:%u;\tsigned:%u;\n",
 437                               (unsigned int)offsetof(typeof(field), data),
 438                               (unsigned int)BUF_PAGE_SIZE,
 439                               (unsigned int)is_signed_type(char));
 440
 441        return ret;
 442}
 443
 444/*
 445 * head_page == tail_page && head == tail then buffer is empty.
 446 */
 447struct ring_buffer_per_cpu {
 448        int                             cpu;
 449        atomic_t                        record_disabled;
 450        struct ring_buffer              *buffer;
 451        raw_spinlock_t                  reader_lock;    /* serialize readers */
 452        arch_spinlock_t                 lock;
 453        struct lock_class_key           lock_key;
 454        unsigned int                    nr_pages;
 455        struct list_head                *pages;
 456        struct buffer_page              *head_page;     /* read from head */
 457        struct buffer_page              *tail_page;     /* write to tail */
 458        struct buffer_page              *commit_page;   /* committed pages */
 459        struct buffer_page              *reader_page;
 460        unsigned long                   lost_events;
 461        unsigned long                   last_overrun;
 462        local_t                         entries_bytes;
 463        local_t                         entries;
 464        local_t                         overrun;
 465        local_t                         commit_overrun;
 466        local_t                         dropped_events;
 467        local_t                         committing;
 468        local_t                         commits;
 469        unsigned long                   read;
 470        unsigned long                   read_bytes;
 471        u64                             write_stamp;
 472        u64                             read_stamp;
 473        /* ring buffer pages to update, > 0 to add, < 0 to remove */
 474        int                             nr_pages_to_update;
 475        struct list_head                new_pages; /* new pages to add */
 476        struct work_struct              update_pages_work;
 477        struct completion               update_done;
 478};
 479
 480struct ring_buffer {
 481        unsigned                        flags;
 482        int                             cpus;
 483        atomic_t                        record_disabled;
 484        atomic_t                        resize_disabled;
 485        cpumask_var_t                   cpumask;
 486
 487        struct lock_class_key           *reader_lock_key;
 488
 489        struct mutex                    mutex;
 490
 491        struct ring_buffer_per_cpu      **buffers;
 492
 493#ifdef CONFIG_HOTPLUG_CPU
 494        struct notifier_block           cpu_notify;
 495#endif
 496        u64                             (*clock)(void);
 497};
 498
 499struct ring_buffer_iter {
 500        struct ring_buffer_per_cpu      *cpu_buffer;
 501        unsigned long                   head;
 502        struct buffer_page              *head_page;
 503        struct buffer_page              *cache_reader_page;
 504        unsigned long                   cache_read;
 505        u64                             read_stamp;
 506};
 507
 508/* buffer may be either ring_buffer or ring_buffer_per_cpu */
 509#define RB_WARN_ON(b, cond)                                             \
 510        ({                                                              \
 511                int _____ret = unlikely(cond);                          \
 512                if (_____ret) {                                         \
 513                        if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
 514                                struct ring_buffer_per_cpu *__b =       \
 515                                        (void *)b;                      \
 516                                atomic_inc(&__b->buffer->record_disabled); \
 517                        } else                                          \
 518                                atomic_inc(&b->record_disabled);        \
 519                        WARN_ON(1);                                     \
 520                }                                                       \
 521                _____ret;                                               \
 522        })
 523
 524/* Up this if you want to test the TIME_EXTENTS and normalization */
 525#define DEBUG_SHIFT 0
 526
 527static inline u64 rb_time_stamp(struct ring_buffer *buffer)
 528{
 529        /* shift to debug/test normalization and TIME_EXTENTS */
 530        return buffer->clock() << DEBUG_SHIFT;
 531}
 532
 533u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
 534{
 535        u64 time;
 536
 537        preempt_disable_notrace();
 538        time = rb_time_stamp(buffer);
 539        preempt_enable_no_resched_notrace();
 540
 541        return time;
 542}
 543EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
 544
 545void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
 546                                      int cpu, u64 *ts)
 547{
 548        /* Just stupid testing the normalize function and deltas */
 549        *ts >>= DEBUG_SHIFT;
 550}
 551EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
 552
 553/*
 554 * Making the ring buffer lockless makes things tricky.
 555 * Although writes only happen on the CPU that they are on,
 556 * and they only need to worry about interrupts. Reads can
 557 * happen on any CPU.
 558 *
 559 * The reader page is always off the ring buffer, but when the
 560 * reader finishes with a page, it needs to swap its page with
 561 * a new one from the buffer. The reader needs to take from
 562 * the head (writes go to the tail). But if a writer is in overwrite
 563 * mode and wraps, it must push the head page forward.
 564 *
 565 * Here lies the problem.
 566 *
 567 * The reader must be careful to replace only the head page, and
 568 * not another one. As described at the top of the file in the
 569 * ASCII art, the reader sets its old page to point to the next
 570 * page after head. It then sets the page after head to point to
 571 * the old reader page. But if the writer moves the head page
 572 * during this operation, the reader could end up with the tail.
 573 *
 574 * We use cmpxchg to help prevent this race. We also do something
 575 * special with the page before head. We set the LSB to 1.
 576 *
 577 * When the writer must push the page forward, it will clear the
 578 * bit that points to the head page, move the head, and then set
 579 * the bit that points to the new head page.
 580 *
 581 * We also don't want an interrupt coming in and moving the head
 582 * page on another writer. Thus we use the second LSB to catch
 583 * that too. Thus:
 584 *
 585 * head->list->prev->next        bit 1          bit 0
 586 *                              -------        -------
 587 * Normal page                     0              0
 588 * Points to head page             0              1
 589 * New head page                   1              0
 590 *
 591 * Note we can not trust the prev pointer of the head page, because:
 592 *
 593 * +----+       +-----+        +-----+
 594 * |    |------>|  T  |---X--->|  N  |
 595 * |    |<------|     |        |     |
 596 * +----+       +-----+        +-----+
 597 *   ^                           ^ |
 598 *   |          +-----+          | |
 599 *   +----------|  R  |----------+ |
 600 *              |     |<-----------+
 601 *              +-----+
 602 *
 603 * Key:  ---X-->  HEAD flag set in pointer
 604 *         T      Tail page
 605 *         R      Reader page
 606 *         N      Next page
 607 *
 608 * (see __rb_reserve_next() to see where this happens)
 609 *
 610 *  What the above shows is that the reader just swapped out
 611 *  the reader page with a page in the buffer, but before it
 612 *  could make the new header point back to the new page added
 613 *  it was preempted by a writer. The writer moved forward onto
 614 *  the new page added by the reader and is about to move forward
 615 *  again.
 616 *
 617 *  You can see, it is legitimate for the previous pointer of
 618 *  the head (or any page) not to point back to itself. But only
 619 *  temporarially.
 620 */
 621
 622#define RB_PAGE_NORMAL          0UL
 623#define RB_PAGE_HEAD            1UL
 624#define RB_PAGE_UPDATE          2UL
 625
 626
 627#define RB_FLAG_MASK            3UL
 628
 629/* PAGE_MOVED is not part of the mask */
 630#define RB_PAGE_MOVED           4UL
 631
 632/*
 633 * rb_list_head - remove any bit
 634 */
 635static struct list_head *rb_list_head(struct list_head *list)
 636{
 637        unsigned long val = (unsigned long)list;
 638
 639        return (struct list_head *)(val & ~RB_FLAG_MASK);
 640}
 641
 642/*
 643 * rb_is_head_page - test if the given page is the head page
 644 *
 645 * Because the reader may move the head_page pointer, we can
 646 * not trust what the head page is (it may be pointing to
 647 * the reader page). But if the next page is a header page,
 648 * its flags will be non zero.
 649 */
 650static inline int
 651rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
 652                struct buffer_page *page, struct list_head *list)
 653{
 654        unsigned long val;
 655
 656        val = (unsigned long)list->next;
 657
 658        if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
 659                return RB_PAGE_MOVED;
 660
 661        return val & RB_FLAG_MASK;
 662}
 663
 664/*
 665 * rb_is_reader_page
 666 *
 667 * The unique thing about the reader page, is that, if the
 668 * writer is ever on it, the previous pointer never points
 669 * back to the reader page.
 670 */
 671static int rb_is_reader_page(struct buffer_page *page)
 672{
 673        struct list_head *list = page->list.prev;
 674
 675        return rb_list_head(list->next) != &page->list;
 676}
 677
 678/*
 679 * rb_set_list_to_head - set a list_head to be pointing to head.
 680 */
 681static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
 682                                struct list_head *list)
 683{
 684        unsigned long *ptr;
 685
 686        ptr = (unsigned long *)&list->next;
 687        *ptr |= RB_PAGE_HEAD;
 688        *ptr &= ~RB_PAGE_UPDATE;
 689}
 690
 691/*
 692 * rb_head_page_activate - sets up head page
 693 */
 694static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
 695{
 696        struct buffer_page *head;
 697
 698        head = cpu_buffer->head_page;
 699        if (!head)
 700                return;
 701
 702        /*
 703         * Set the previous list pointer to have the HEAD flag.
 704         */
 705        rb_set_list_to_head(cpu_buffer, head->list.prev);
 706}
 707
 708static void rb_list_head_clear(struct list_head *list)
 709{
 710        unsigned long *ptr = (unsigned long *)&list->next;
 711
 712        *ptr &= ~RB_FLAG_MASK;
 713}
 714
 715/*
 716 * rb_head_page_dactivate - clears head page ptr (for free list)
 717 */
 718static void
 719rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
 720{
 721        struct list_head *hd;
 722
 723        /* Go through the whole list and clear any pointers found. */
 724        rb_list_head_clear(cpu_buffer->pages);
 725
 726        list_for_each(hd, cpu_buffer->pages)
 727                rb_list_head_clear(hd);
 728}
 729
 730static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
 731                            struct buffer_page *head,
 732                            struct buffer_page *prev,
 733                            int old_flag, int new_flag)
 734{
 735        struct list_head *list;
 736        unsigned long val = (unsigned long)&head->list;
 737        unsigned long ret;
 738
 739        list = &prev->list;
 740
 741        val &= ~RB_FLAG_MASK;
 742
 743        ret = cmpxchg((unsigned long *)&list->next,
 744                      val | old_flag, val | new_flag);
 745
 746        /* check if the reader took the page */
 747        if ((ret & ~RB_FLAG_MASK) != val)
 748                return RB_PAGE_MOVED;
 749
 750        return ret & RB_FLAG_MASK;
 751}
 752
 753static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
 754                                   struct buffer_page *head,
 755                                   struct buffer_page *prev,
 756                                   int old_flag)
 757{
 758        return rb_head_page_set(cpu_buffer, head, prev,
 759                                old_flag, RB_PAGE_UPDATE);
 760}
 761
 762static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
 763                                 struct buffer_page *head,
 764                                 struct buffer_page *prev,
 765                                 int old_flag)
 766{
 767        return rb_head_page_set(cpu_buffer, head, prev,
 768                                old_flag, RB_PAGE_HEAD);
 769}
 770
 771static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
 772                                   struct buffer_page *head,
 773                                   struct buffer_page *prev,
 774                                   int old_flag)
 775{
 776        return rb_head_page_set(cpu_buffer, head, prev,
 777                                old_flag, RB_PAGE_NORMAL);
 778}
 779
 780static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
 781                               struct buffer_page **bpage)
 782{
 783        struct list_head *p = rb_list_head((*bpage)->list.next);
 784
 785        *bpage = list_entry(p, struct buffer_page, list);
 786}
 787
 788static struct buffer_page *
 789rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
 790{
 791        struct buffer_page *head;
 792        struct buffer_page *page;
 793        struct list_head *list;
 794        int i;
 795
 796        if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
 797                return NULL;
 798
 799        /* sanity check */
 800        list = cpu_buffer->pages;
 801        if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
 802                return NULL;
 803
 804        page = head = cpu_buffer->head_page;
 805        /*
 806         * It is possible that the writer moves the header behind
 807         * where we started, and we miss in one loop.
 808         * A second loop should grab the header, but we'll do
 809         * three loops just because I'm paranoid.
 810         */
 811        for (i = 0; i < 3; i++) {
 812                do {
 813                        if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
 814                                cpu_buffer->head_page = page;
 815                                return page;
 816                        }
 817                        rb_inc_page(cpu_buffer, &page);
 818                } while (page != head);
 819        }
 820
 821        RB_WARN_ON(cpu_buffer, 1);
 822
 823        return NULL;
 824}
 825
 826static int rb_head_page_replace(struct buffer_page *old,
 827                                struct buffer_page *new)
 828{
 829        unsigned long *ptr = (unsigned long *)&old->list.prev->next;
 830        unsigned long val;
 831        unsigned long ret;
 832
 833        val = *ptr & ~RB_FLAG_MASK;
 834        val |= RB_PAGE_HEAD;
 835
 836        ret = cmpxchg(ptr, val, (unsigned long)&new->list);
 837
 838        return ret == val;
 839}
 840
 841/*
 842 * rb_tail_page_update - move the tail page forward
 843 *
 844 * Returns 1 if moved tail page, 0 if someone else did.
 845 */
 846static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
 847                               struct buffer_page *tail_page,
 848                               struct buffer_page *next_page)
 849{
 850        struct buffer_page *old_tail;
 851        unsigned long old_entries;
 852        unsigned long old_write;
 853        int ret = 0;
 854
 855        /*
 856         * The tail page now needs to be moved forward.
 857         *
 858         * We need to reset the tail page, but without messing
 859         * with possible erasing of data brought in by interrupts
 860         * that have moved the tail page and are currently on it.
 861         *
 862         * We add a counter to the write field to denote this.
 863         */
 864        old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
 865        old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
 866
 867        /*
 868         * Just make sure we have seen our old_write and synchronize
 869         * with any interrupts that come in.
 870         */
 871        barrier();
 872
 873        /*
 874         * If the tail page is still the same as what we think
 875         * it is, then it is up to us to update the tail
 876         * pointer.
 877         */
 878        if (tail_page == cpu_buffer->tail_page) {
 879                /* Zero the write counter */
 880                unsigned long val = old_write & ~RB_WRITE_MASK;
 881                unsigned long eval = old_entries & ~RB_WRITE_MASK;
 882
 883                /*
 884                 * This will only succeed if an interrupt did
 885                 * not come in and change it. In which case, we
 886                 * do not want to modify it.
 887                 *
 888                 * We add (void) to let the compiler know that we do not care
 889                 * about the return value of these functions. We use the
 890                 * cmpxchg to only update if an interrupt did not already
 891                 * do it for us. If the cmpxchg fails, we don't care.
 892                 */
 893                (void)local_cmpxchg(&next_page->write, old_write, val);
 894                (void)local_cmpxchg(&next_page->entries, old_entries, eval);
 895
 896                /*
 897                 * No need to worry about races with clearing out the commit.
 898                 * it only can increment when a commit takes place. But that
 899                 * only happens in the outer most nested commit.
 900                 */
 901                local_set(&next_page->page->commit, 0);
 902
 903                old_tail = cmpxchg(&cpu_buffer->tail_page,
 904                                   tail_page, next_page);
 905
 906                if (old_tail == tail_page)
 907                        ret = 1;
 908        }
 909
 910        return ret;
 911}
 912
 913static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
 914                          struct buffer_page *bpage)
 915{
 916        unsigned long val = (unsigned long)bpage;
 917
 918        if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
 919                return 1;
 920
 921        return 0;
 922}
 923
 924/**
 925 * rb_check_list - make sure a pointer to a list has the last bits zero
 926 */
 927static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
 928                         struct list_head *list)
 929{
 930        if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
 931                return 1;
 932        if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
 933                return 1;
 934        return 0;
 935}
 936
 937/**
 938 * check_pages - integrity check of buffer pages
 939 * @cpu_buffer: CPU buffer with pages to test
 940 *
 941 * As a safety measure we check to make sure the data pages have not
 942 * been corrupted.
 943 */
 944static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
 945{
 946        struct list_head *head = cpu_buffer->pages;
 947        struct buffer_page *bpage, *tmp;
 948
 949        /* Reset the head page if it exists */
 950        if (cpu_buffer->head_page)
 951                rb_set_head_page(cpu_buffer);
 952
 953        rb_head_page_deactivate(cpu_buffer);
 954
 955        if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
 956                return -1;
 957        if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
 958                return -1;
 959
 960        if (rb_check_list(cpu_buffer, head))
 961                return -1;
 962
 963        list_for_each_entry_safe(bpage, tmp, head, list) {
 964                if (RB_WARN_ON(cpu_buffer,
 965                               bpage->list.next->prev != &bpage->list))
 966                        return -1;
 967                if (RB_WARN_ON(cpu_buffer,
 968                               bpage->list.prev->next != &bpage->list))
 969                        return -1;
 970                if (rb_check_list(cpu_buffer, &bpage->list))
 971                        return -1;
 972        }
 973
 974        rb_head_page_activate(cpu_buffer);
 975
 976        return 0;
 977}
 978
 979static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
 980{
 981        int i;
 982        struct buffer_page *bpage, *tmp;
 983
 984        for (i = 0; i < nr_pages; i++) {
 985                struct page *page;
 986                /*
 987                 * __GFP_NORETRY flag makes sure that the allocation fails
 988                 * gracefully without invoking oom-killer and the system is
 989                 * not destabilized.
 990                 */
 991                bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
 992                                    GFP_KERNEL | __GFP_NORETRY,
 993                                    cpu_to_node(cpu));
 994                if (!bpage)
 995                        goto free_pages;
 996
 997                list_add(&bpage->list, pages);
 998
 999                page = alloc_pages_node(cpu_to_node(cpu),
1000                                        GFP_KERNEL | __GFP_NORETRY, 0);
1001                if (!page)
1002                        goto free_pages;
1003                bpage->page = page_address(page);
1004                rb_init_page(bpage->page);
1005        }
1006
1007        return 0;
1008
1009free_pages:
1010        list_for_each_entry_safe(bpage, tmp, pages, list) {
1011                list_del_init(&bpage->list);
1012                free_buffer_page(bpage);
1013        }
1014
1015        return -ENOMEM;
1016}
1017
1018static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1019                             unsigned nr_pages)
1020{
1021        LIST_HEAD(pages);
1022
1023        WARN_ON(!nr_pages);
1024
1025        if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1026                return -ENOMEM;
1027
1028        /*
1029         * The ring buffer page list is a circular list that does not
1030         * start and end with a list head. All page list items point to
1031         * other pages.
1032         */
1033        cpu_buffer->pages = pages.next;
1034        list_del(&pages);
1035
1036        cpu_buffer->nr_pages = nr_pages;
1037
1038        rb_check_pages(cpu_buffer);
1039
1040        return 0;
1041}
1042
1043static struct ring_buffer_per_cpu *
1044rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
1045{
1046        struct ring_buffer_per_cpu *cpu_buffer;
1047        struct buffer_page *bpage;
1048        struct page *page;
1049        int ret;
1050
1051        cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1052                                  GFP_KERNEL, cpu_to_node(cpu));
1053        if (!cpu_buffer)
1054                return NULL;
1055
1056        cpu_buffer->cpu = cpu;
1057        cpu_buffer->buffer = buffer;
1058        raw_spin_lock_init(&cpu_buffer->reader_lock);
1059        lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1060        cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1061        INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1062        init_completion(&cpu_buffer->update_done);
1063
1064        bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1065                            GFP_KERNEL, cpu_to_node(cpu));
1066        if (!bpage)
1067                goto fail_free_buffer;
1068
1069        rb_check_bpage(cpu_buffer, bpage);
1070
1071        cpu_buffer->reader_page = bpage;
1072        page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1073        if (!page)
1074                goto fail_free_reader;
1075        bpage->page = page_address(page);
1076        rb_init_page(bpage->page);
1077
1078        INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1079        INIT_LIST_HEAD(&cpu_buffer->new_pages);
1080
1081        ret = rb_allocate_pages(cpu_buffer, nr_pages);
1082        if (ret < 0)
1083                goto fail_free_reader;
1084
1085        cpu_buffer->head_page
1086                = list_entry(cpu_buffer->pages, struct buffer_page, list);
1087        cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1088
1089        rb_head_page_activate(cpu_buffer);
1090
1091        return cpu_buffer;
1092
1093 fail_free_reader:
1094        free_buffer_page(cpu_buffer->reader_page);
1095
1096 fail_free_buffer:
1097        kfree(cpu_buffer);
1098        return NULL;
1099}
1100
1101static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1102{
1103        struct list_head *head = cpu_buffer->pages;
1104        struct buffer_page *bpage, *tmp;
1105
1106        free_buffer_page(cpu_buffer->reader_page);
1107
1108        rb_head_page_deactivate(cpu_buffer);
1109
1110        if (head) {
1111                list_for_each_entry_safe(bpage, tmp, head, list) {
1112                        list_del_init(&bpage->list);
1113                        free_buffer_page(bpage);
1114                }
1115                bpage = list_entry(head, struct buffer_page, list);
1116                free_buffer_page(bpage);
1117        }
1118
1119        kfree(cpu_buffer);
1120}
1121
1122#ifdef CONFIG_HOTPLUG_CPU
1123static int rb_cpu_notify(struct notifier_block *self,
1124                         unsigned long action, void *hcpu);
1125#endif
1126
1127/**
1128 * ring_buffer_alloc - allocate a new ring_buffer
1129 * @size: the size in bytes per cpu that is needed.
1130 * @flags: attributes to set for the ring buffer.
1131 *
1132 * Currently the only flag that is available is the RB_FL_OVERWRITE
1133 * flag. This flag means that the buffer will overwrite old data
1134 * when the buffer wraps. If this flag is not set, the buffer will
1135 * drop data when the tail hits the head.
1136 */
1137struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1138                                        struct lock_class_key *key)
1139{
1140        struct ring_buffer *buffer;
1141        int bsize;
1142        int cpu, nr_pages;
1143
1144        /* keep it in its own cache line */
1145        buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1146                         GFP_KERNEL);
1147        if (!buffer)
1148                return NULL;
1149
1150        if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1151                goto fail_free_buffer;
1152
1153        nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1154        buffer->flags = flags;
1155        buffer->clock = trace_clock_local;
1156        buffer->reader_lock_key = key;
1157
1158        /* need at least two pages */
1159        if (nr_pages < 2)
1160                nr_pages = 2;
1161
1162        /*
1163         * In case of non-hotplug cpu, if the ring-buffer is allocated
1164         * in early initcall, it will not be notified of secondary cpus.
1165         * In that off case, we need to allocate for all possible cpus.
1166         */
1167#ifdef CONFIG_HOTPLUG_CPU
1168        get_online_cpus();
1169        cpumask_copy(buffer->cpumask, cpu_online_mask);
1170#else
1171        cpumask_copy(buffer->cpumask, cpu_possible_mask);
1172#endif
1173        buffer->cpus = nr_cpu_ids;
1174
1175        bsize = sizeof(void *) * nr_cpu_ids;
1176        buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1177                                  GFP_KERNEL);
1178        if (!buffer->buffers)
1179                goto fail_free_cpumask;
1180
1181        for_each_buffer_cpu(buffer, cpu) {
1182                buffer->buffers[cpu] =
1183                        rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1184                if (!buffer->buffers[cpu])
1185                        goto fail_free_buffers;
1186        }
1187
1188#ifdef CONFIG_HOTPLUG_CPU
1189        buffer->cpu_notify.notifier_call = rb_cpu_notify;
1190        buffer->cpu_notify.priority = 0;
1191        register_cpu_notifier(&buffer->cpu_notify);
1192#endif
1193
1194        put_online_cpus();
1195        mutex_init(&buffer->mutex);
1196
1197        return buffer;
1198
1199 fail_free_buffers:
1200        for_each_buffer_cpu(buffer, cpu) {
1201                if (buffer->buffers[cpu])
1202                        rb_free_cpu_buffer(buffer->buffers[cpu]);
1203        }
1204        kfree(buffer->buffers);
1205
1206 fail_free_cpumask:
1207        free_cpumask_var(buffer->cpumask);
1208        put_online_cpus();
1209
1210 fail_free_buffer:
1211        kfree(buffer);
1212        return NULL;
1213}
1214EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1215
1216/**
1217 * ring_buffer_free - free a ring buffer.
1218 * @buffer: the buffer to free.
1219 */
1220void
1221ring_buffer_free(struct ring_buffer *buffer)
1222{
1223        int cpu;
1224
1225        get_online_cpus();
1226
1227#ifdef CONFIG_HOTPLUG_CPU
1228        unregister_cpu_notifier(&buffer->cpu_notify);
1229#endif
1230
1231        for_each_buffer_cpu(buffer, cpu)
1232                rb_free_cpu_buffer(buffer->buffers[cpu]);
1233
1234        put_online_cpus();
1235
1236        kfree(buffer->buffers);
1237        free_cpumask_var(buffer->cpumask);
1238
1239        kfree(buffer);
1240}
1241EXPORT_SYMBOL_GPL(ring_buffer_free);
1242
1243void ring_buffer_set_clock(struct ring_buffer *buffer,
1244                           u64 (*clock)(void))
1245{
1246        buffer->clock = clock;
1247}
1248
1249static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1250
1251static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1252{
1253        return local_read(&bpage->entries) & RB_WRITE_MASK;
1254}
1255
1256static inline unsigned long rb_page_write(struct buffer_page *bpage)
1257{
1258        return local_read(&bpage->write) & RB_WRITE_MASK;
1259}
1260
1261static int
1262rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
1263{
1264        struct list_head *tail_page, *to_remove, *next_page;
1265        struct buffer_page *to_remove_page, *tmp_iter_page;
1266        struct buffer_page *last_page, *first_page;
1267        unsigned int nr_removed;
1268        unsigned long head_bit;
1269        int page_entries;
1270
1271        head_bit = 0;
1272
1273        raw_spin_lock_irq(&cpu_buffer->reader_lock);
1274        atomic_inc(&cpu_buffer->record_disabled);
1275        /*
1276         * We don't race with the readers since we have acquired the reader
1277         * lock. We also don't race with writers after disabling recording.
1278         * This makes it easy to figure out the first and the last page to be
1279         * removed from the list. We unlink all the pages in between including
1280         * the first and last pages. This is done in a busy loop so that we
1281         * lose the least number of traces.
1282         * The pages are freed after we restart recording and unlock readers.
1283         */
1284        tail_page = &cpu_buffer->tail_page->list;
1285
1286        /*
1287         * tail page might be on reader page, we remove the next page
1288         * from the ring buffer
1289         */
1290        if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1291                tail_page = rb_list_head(tail_page->next);
1292        to_remove = tail_page;
1293
1294        /* start of pages to remove */
1295        first_page = list_entry(rb_list_head(to_remove->next),
1296                                struct buffer_page, list);
1297
1298        for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1299                to_remove = rb_list_head(to_remove)->next;
1300                head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1301        }
1302
1303        next_page = rb_list_head(to_remove)->next;
1304
1305        /*
1306         * Now we remove all pages between tail_page and next_page.
1307         * Make sure that we have head_bit value preserved for the
1308         * next page
1309         */
1310        tail_page->next = (struct list_head *)((unsigned long)next_page |
1311                                                head_bit);
1312        next_page = rb_list_head(next_page);
1313        next_page->prev = tail_page;
1314
1315        /* make sure pages points to a valid page in the ring buffer */
1316        cpu_buffer->pages = next_page;
1317
1318        /* update head page */
1319        if (head_bit)
1320                cpu_buffer->head_page = list_entry(next_page,
1321                                                struct buffer_page, list);
1322
1323        /*
1324         * change read pointer to make sure any read iterators reset
1325         * themselves
1326         */
1327        cpu_buffer->read = 0;
1328
1329        /* pages are removed, resume tracing and then free the pages */
1330        atomic_dec(&cpu_buffer->record_disabled);
1331        raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1332
1333        RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1334
1335        /* last buffer page to remove */
1336        last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1337                                list);
1338        tmp_iter_page = first_page;
1339
1340        do {
1341                to_remove_page = tmp_iter_page;
1342                rb_inc_page(cpu_buffer, &tmp_iter_page);
1343
1344                /* update the counters */
1345                page_entries = rb_page_entries(to_remove_page);
1346                if (page_entries) {
1347                        /*
1348                         * If something was added to this page, it was full
1349                         * since it is not the tail page. So we deduct the
1350                         * bytes consumed in ring buffer from here.
1351                         * Increment overrun to account for the lost events.
1352                         */
1353                        local_add(page_entries, &cpu_buffer->overrun);
1354                        local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1355                }
1356
1357                /*
1358                 * We have already removed references to this list item, just
1359                 * free up the buffer_page and its page
1360                 */
1361                free_buffer_page(to_remove_page);
1362                nr_removed--;
1363
1364        } while (to_remove_page != last_page);
1365
1366        RB_WARN_ON(cpu_buffer, nr_removed);
1367
1368        return nr_removed == 0;
1369}
1370
1371static int
1372rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1373{
1374        struct list_head *pages = &cpu_buffer->new_pages;
1375        int retries, success;
1376
1377        raw_spin_lock_irq(&cpu_buffer->reader_lock);
1378        /*
1379         * We are holding the reader lock, so the reader page won't be swapped
1380         * in the ring buffer. Now we are racing with the writer trying to
1381         * move head page and the tail page.
1382         * We are going to adapt the reader page update process where:
1383         * 1. We first splice the start and end of list of new pages between
1384         *    the head page and its previous page.
1385         * 2. We cmpxchg the prev_page->next to point from head page to the
1386         *    start of new pages list.
1387         * 3. Finally, we update the head->prev to the end of new list.
1388         *
1389         * We will try this process 10 times, to make sure that we don't keep
1390         * spinning.
1391         */
1392        retries = 10;
1393        success = 0;
1394        while (retries--) {
1395                struct list_head *head_page, *prev_page, *r;
1396                struct list_head *last_page, *first_page;
1397                struct list_head *head_page_with_bit;
1398
1399                head_page = &rb_set_head_page(cpu_buffer)->list;
1400                if (!head_page)
1401                        break;
1402                prev_page = head_page->prev;
1403
1404                first_page = pages->next;
1405                last_page  = pages->prev;
1406
1407                head_page_with_bit = (struct list_head *)
1408                                     ((unsigned long)head_page | RB_PAGE_HEAD);
1409
1410                last_page->next = head_page_with_bit;
1411                first_page->prev = prev_page;
1412
1413                r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1414
1415                if (r == head_page_with_bit) {
1416                        /*
1417                         * yay, we replaced the page pointer to our new list,
1418                         * now, we just have to update to head page's prev
1419                         * pointer to point to end of list
1420                         */
1421                        head_page->prev = last_page;
1422                        success = 1;
1423                        break;
1424                }
1425        }
1426
1427        if (success)
1428                INIT_LIST_HEAD(pages);
1429        /*
1430         * If we weren't successful in adding in new pages, warn and stop
1431         * tracing
1432         */
1433        RB_WARN_ON(cpu_buffer, !success);
1434        raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1435
1436        /* free pages if they weren't inserted */
1437        if (!success) {
1438                struct buffer_page *bpage, *tmp;
1439                list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1440                                         list) {
1441                        list_del_init(&bpage->list);
1442                        free_buffer_page(bpage);
1443                }
1444        }
1445        return success;
1446}
1447
1448static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1449{
1450        int success;
1451
1452        if (cpu_buffer->nr_pages_to_update > 0)
1453                success = rb_insert_pages(cpu_buffer);
1454        else
1455                success = rb_remove_pages(cpu_buffer,
1456                                        -cpu_buffer->nr_pages_to_update);
1457
1458        if (success)
1459                cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1460}
1461
1462static void update_pages_handler(struct work_struct *work)
1463{
1464        struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1465                        struct ring_buffer_per_cpu, update_pages_work);
1466        rb_update_pages(cpu_buffer);
1467        complete(&cpu_buffer->update_done);
1468}
1469
1470/**
1471 * ring_buffer_resize - resize the ring buffer
1472 * @buffer: the buffer to resize.
1473 * @size: the new size.
1474 *
1475 * Minimum size is 2 * BUF_PAGE_SIZE.
1476 *
1477 * Returns 0 on success and < 0 on failure.
1478 */
1479int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1480                        int cpu_id)
1481{
1482        struct ring_buffer_per_cpu *cpu_buffer;
1483        unsigned nr_pages;
1484        int cpu, err = 0;
1485
1486        /*
1487         * Always succeed at resizing a non-existent buffer:
1488         */
1489        if (!buffer)
1490                return size;
1491
1492        /* Make sure the requested buffer exists */
1493        if (cpu_id != RING_BUFFER_ALL_CPUS &&
1494            !cpumask_test_cpu(cpu_id, buffer->cpumask))
1495                return size;
1496
1497        size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1498        size *= BUF_PAGE_SIZE;
1499
1500        /* we need a minimum of two pages */
1501        if (size < BUF_PAGE_SIZE * 2)
1502                size = BUF_PAGE_SIZE * 2;
1503
1504        nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1505
1506        /*
1507         * Don't succeed if resizing is disabled, as a reader might be
1508         * manipulating the ring buffer and is expecting a sane state while
1509         * this is true.
1510         */
1511        if (atomic_read(&buffer->resize_disabled))
1512                return -EBUSY;
1513
1514        /* prevent another thread from changing buffer sizes */
1515        mutex_lock(&buffer->mutex);
1516
1517        if (cpu_id == RING_BUFFER_ALL_CPUS) {
1518                /* calculate the pages to update */
1519                for_each_buffer_cpu(buffer, cpu) {
1520                        cpu_buffer = buffer->buffers[cpu];
1521
1522                        cpu_buffer->nr_pages_to_update = nr_pages -
1523                                                        cpu_buffer->nr_pages;
1524                        /*
1525                         * nothing more to do for removing pages or no update
1526                         */
1527                        if (cpu_buffer->nr_pages_to_update <= 0)
1528                                continue;
1529                        /*
1530                         * to add pages, make sure all new pages can be
1531                         * allocated without receiving ENOMEM
1532                         */
1533                        INIT_LIST_HEAD(&cpu_buffer->new_pages);
1534                        if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1535                                                &cpu_buffer->new_pages, cpu)) {
1536                                /* not enough memory for new pages */
1537                                err = -ENOMEM;
1538                                goto out_err;
1539                        }
1540                }
1541
1542                get_online_cpus();
1543                /*
1544                 * Fire off all the required work handlers
1545                 * We can't schedule on offline CPUs, but it's not necessary
1546                 * since we can change their buffer sizes without any race.
1547                 */
1548                for_each_buffer_cpu(buffer, cpu) {
1549                        cpu_buffer = buffer->buffers[cpu];
1550                        if (!cpu_buffer->nr_pages_to_update)
1551                                continue;
1552
1553                        if (cpu_online(cpu))
1554                                schedule_work_on(cpu,
1555                                                &cpu_buffer->update_pages_work);
1556                        else
1557                                rb_update_pages(cpu_buffer);
1558                }
1559
1560                /* wait for all the updates to complete */
1561                for_each_buffer_cpu(buffer, cpu) {
1562                        cpu_buffer = buffer->buffers[cpu];
1563                        if (!cpu_buffer->nr_pages_to_update)
1564                                continue;
1565
1566                        if (cpu_online(cpu))
1567                                wait_for_completion(&cpu_buffer->update_done);
1568                        cpu_buffer->nr_pages_to_update = 0;
1569                }
1570
1571                put_online_cpus();
1572        } else {
1573                /* Make sure this CPU has been intitialized */
1574                if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1575                        goto out;
1576
1577                cpu_buffer = buffer->buffers[cpu_id];
1578
1579                if (nr_pages == cpu_buffer->nr_pages)
1580                        goto out;
1581
1582                cpu_buffer->nr_pages_to_update = nr_pages -
1583                                                cpu_buffer->nr_pages;
1584
1585                INIT_LIST_HEAD(&cpu_buffer->new_pages);
1586                if (cpu_buffer->nr_pages_to_update > 0 &&
1587                        __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1588                                            &cpu_buffer->new_pages, cpu_id)) {
1589                        err = -ENOMEM;
1590                        goto out_err;
1591                }
1592
1593                get_online_cpus();
1594
1595                if (cpu_online(cpu_id)) {
1596                        schedule_work_on(cpu_id,
1597                                         &cpu_buffer->update_pages_work);
1598                        wait_for_completion(&cpu_buffer->update_done);
1599                } else
1600                        rb_update_pages(cpu_buffer);
1601
1602                cpu_buffer->nr_pages_to_update = 0;
1603                put_online_cpus();
1604        }
1605
1606 out:
1607        /*
1608         * The ring buffer resize can happen with the ring buffer
1609         * enabled, so that the update disturbs the tracing as little
1610         * as possible. But if the buffer is disabled, we do not need
1611         * to worry about that, and we can take the time to verify
1612         * that the buffer is not corrupt.
1613         */
1614        if (atomic_read(&buffer->record_disabled)) {
1615                atomic_inc(&buffer->record_disabled);
1616                /*
1617                 * Even though the buffer was disabled, we must make sure
1618                 * that it is truly disabled before calling rb_check_pages.
1619                 * There could have been a race between checking
1620                 * record_disable and incrementing it.
1621                 */
1622                synchronize_sched();
1623                for_each_buffer_cpu(buffer, cpu) {
1624                        cpu_buffer = buffer->buffers[cpu];
1625                        rb_check_pages(cpu_buffer);
1626                }
1627                atomic_dec(&buffer->record_disabled);
1628        }
1629
1630        mutex_unlock(&buffer->mutex);
1631        return size;
1632
1633 out_err:
1634        for_each_buffer_cpu(buffer, cpu) {
1635                struct buffer_page *bpage, *tmp;
1636
1637                cpu_buffer = buffer->buffers[cpu];
1638                cpu_buffer->nr_pages_to_update = 0;
1639
1640                if (list_empty(&cpu_buffer->new_pages))
1641                        continue;
1642
1643                list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1644                                        list) {
1645                        list_del_init(&bpage->list);
1646                        free_buffer_page(bpage);
1647                }
1648        }
1649        mutex_unlock(&buffer->mutex);
1650        return err;
1651}
1652EXPORT_SYMBOL_GPL(ring_buffer_resize);
1653
1654void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1655{
1656        mutex_lock(&buffer->mutex);
1657        if (val)
1658                buffer->flags |= RB_FL_OVERWRITE;
1659        else
1660                buffer->flags &= ~RB_FL_OVERWRITE;
1661        mutex_unlock(&buffer->mutex);
1662}
1663EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1664
1665static inline void *
1666__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1667{
1668        return bpage->data + index;
1669}
1670
1671static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1672{
1673        return bpage->page->data + index;
1674}
1675
1676static inline struct ring_buffer_event *
1677rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1678{
1679        return __rb_page_index(cpu_buffer->reader_page,
1680                               cpu_buffer->reader_page->read);
1681}
1682
1683static inline struct ring_buffer_event *
1684rb_iter_head_event(struct ring_buffer_iter *iter)
1685{
1686        return __rb_page_index(iter->head_page, iter->head);
1687}
1688
1689static inline unsigned rb_page_commit(struct buffer_page *bpage)
1690{
1691        return local_read(&bpage->page->commit);
1692}
1693
1694/* Size is determined by what has been committed */
1695static inline unsigned rb_page_size(struct buffer_page *bpage)
1696{
1697        return rb_page_commit(bpage);
1698}
1699
1700static inline unsigned
1701rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1702{
1703        return rb_page_commit(cpu_buffer->commit_page);
1704}
1705
1706static inline unsigned
1707rb_event_index(struct ring_buffer_event *event)
1708{
1709        unsigned long addr = (unsigned long)event;
1710
1711        return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1712}
1713
1714static inline int
1715rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1716                   struct ring_buffer_event *event)
1717{
1718        unsigned long addr = (unsigned long)event;
1719        unsigned long index;
1720
1721        index = rb_event_index(event);
1722        addr &= PAGE_MASK;
1723
1724        return cpu_buffer->commit_page->page == (void *)addr &&
1725                rb_commit_index(cpu_buffer) == index;
1726}
1727
1728static void
1729rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1730{
1731        unsigned long max_count;
1732
1733        /*
1734         * We only race with interrupts and NMIs on this CPU.
1735         * If we own the commit event, then we can commit
1736         * all others that interrupted us, since the interruptions
1737         * are in stack format (they finish before they come
1738         * back to us). This allows us to do a simple loop to
1739         * assign the commit to the tail.
1740         */
1741 again:
1742        max_count = cpu_buffer->nr_pages * 100;
1743
1744        while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1745                if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1746                        return;
1747                if (RB_WARN_ON(cpu_buffer,
1748                               rb_is_reader_page(cpu_buffer->tail_page)))
1749                        return;
1750                local_set(&cpu_buffer->commit_page->page->commit,
1751                          rb_page_write(cpu_buffer->commit_page));
1752                rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1753                cpu_buffer->write_stamp =
1754                        cpu_buffer->commit_page->page->time_stamp;
1755                /* add barrier to keep gcc from optimizing too much */
1756                barrier();
1757        }
1758        while (rb_commit_index(cpu_buffer) !=
1759               rb_page_write(cpu_buffer->commit_page)) {
1760
1761                local_set(&cpu_buffer->commit_page->page->commit,
1762                          rb_page_write(cpu_buffer->commit_page));
1763                RB_WARN_ON(cpu_buffer,
1764                           local_read(&cpu_buffer->commit_page->page->commit) &
1765                           ~RB_WRITE_MASK);
1766                barrier();
1767        }
1768
1769        /* again, keep gcc from optimizing */
1770        barrier();
1771
1772        /*
1773         * If an interrupt came in just after the first while loop
1774         * and pushed the tail page forward, we will be left with
1775         * a dangling commit that will never go forward.
1776         */
1777        if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1778                goto again;
1779}
1780
1781static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1782{
1783        cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1784        cpu_buffer->reader_page->read = 0;
1785}
1786
1787static void rb_inc_iter(struct ring_buffer_iter *iter)
1788{
1789        struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1790
1791        /*
1792         * The iterator could be on the reader page (it starts there).
1793         * But the head could have moved, since the reader was
1794         * found. Check for this case and assign the iterator
1795         * to the head page instead of next.
1796         */
1797        if (iter->head_page == cpu_buffer->reader_page)
1798                iter->head_page = rb_set_head_page(cpu_buffer);
1799        else
1800                rb_inc_page(cpu_buffer, &iter->head_page);
1801
1802        iter->read_stamp = iter->head_page->page->time_stamp;
1803        iter->head = 0;
1804}
1805
1806/* Slow path, do not inline */
1807static noinline struct ring_buffer_event *
1808rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
1809{
1810        event->type_len = RINGBUF_TYPE_TIME_EXTEND;
1811
1812        /* Not the first event on the page? */
1813        if (rb_event_index(event)) {
1814                event->time_delta = delta & TS_MASK;
1815                event->array[0] = delta >> TS_SHIFT;
1816        } else {
1817                /* nope, just zero it */
1818                event->time_delta = 0;
1819                event->array[0] = 0;
1820        }
1821
1822        return skip_time_extend(event);
1823}
1824
1825/**
1826 * rb_update_event - update event type and data
1827 * @event: the even to update
1828 * @type: the type of event
1829 * @length: the size of the event field in the ring buffer
1830 *
1831 * Update the type and data fields of the event. The length
1832 * is the actual size that is written to the ring buffer,
1833 * and with this, we can determine what to place into the
1834 * data field.
1835 */
1836static void
1837rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
1838                struct ring_buffer_event *event, unsigned length,
1839                int add_timestamp, u64 delta)
1840{
1841        /* Only a commit updates the timestamp */
1842        if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
1843                delta = 0;
1844
1845        /*
1846         * If we need to add a timestamp, then we
1847         * add it to the start of the resevered space.
1848         */
1849        if (unlikely(add_timestamp)) {
1850                event = rb_add_time_stamp(event, delta);
1851                length -= RB_LEN_TIME_EXTEND;
1852                delta = 0;
1853        }
1854
1855        event->time_delta = delta;
1856        length -= RB_EVNT_HDR_SIZE;
1857        if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
1858                event->type_len = 0;
1859                event->array[0] = length;
1860        } else
1861                event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
1862}
1863
1864/*
1865 * rb_handle_head_page - writer hit the head page
1866 *
1867 * Returns: +1 to retry page
1868 *           0 to continue
1869 *          -1 on error
1870 */
1871static int
1872rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1873                    struct buffer_page *tail_page,
1874                    struct buffer_page *next_page)
1875{
1876        struct buffer_page *new_head;
1877        int entries;
1878        int type;
1879        int ret;
1880
1881        entries = rb_page_entries(next_page);
1882
1883        /*
1884         * The hard part is here. We need to move the head
1885         * forward, and protect against both readers on
1886         * other CPUs and writers coming in via interrupts.
1887         */
1888        type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1889                                       RB_PAGE_HEAD);
1890
1891        /*
1892         * type can be one of four:
1893         *  NORMAL - an interrupt already moved it for us
1894         *  HEAD   - we are the first to get here.
1895         *  UPDATE - we are the interrupt interrupting
1896         *           a current move.
1897         *  MOVED  - a reader on another CPU moved the next
1898         *           pointer to its reader page. Give up
1899         *           and try again.
1900         */
1901
1902        switch (type) {
1903        case RB_PAGE_HEAD:
1904                /*
1905                 * We changed the head to UPDATE, thus
1906                 * it is our responsibility to update
1907                 * the counters.
1908                 */
1909                local_add(entries, &cpu_buffer->overrun);
1910                local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1911
1912                /*
1913                 * The entries will be zeroed out when we move the
1914                 * tail page.
1915                 */
1916
1917                /* still more to do */
1918                break;
1919
1920        case RB_PAGE_UPDATE:
1921                /*
1922                 * This is an interrupt that interrupt the
1923                 * previous update. Still more to do.
1924                 */
1925                break;
1926        case RB_PAGE_NORMAL:
1927                /*
1928                 * An interrupt came in before the update
1929                 * and processed this for us.
1930                 * Nothing left to do.
1931                 */
1932                return 1;
1933        case RB_PAGE_MOVED:
1934                /*
1935                 * The reader is on another CPU and just did
1936                 * a swap with our next_page.
1937                 * Try again.
1938                 */
1939                return 1;
1940        default:
1941                RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1942                return -1;
1943        }
1944
1945        /*
1946         * Now that we are here, the old head pointer is
1947         * set to UPDATE. This will keep the reader from
1948         * swapping the head page with the reader page.
1949         * The reader (on another CPU) will spin till
1950         * we are finished.
1951         *
1952         * We just need to protect against interrupts
1953         * doing the job. We will set the next pointer
1954         * to HEAD. After that, we set the old pointer
1955         * to NORMAL, but only if it was HEAD before.
1956         * otherwise we are an interrupt, and only
1957         * want the outer most commit to reset it.
1958         */
1959        new_head = next_page;
1960        rb_inc_page(cpu_buffer, &new_head);
1961
1962        ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1963                                    RB_PAGE_NORMAL);
1964
1965        /*
1966         * Valid returns are:
1967         *  HEAD   - an interrupt came in and already set it.
1968         *  NORMAL - One of two things:
1969         *            1) We really set it.
1970         *            2) A bunch of interrupts came in and moved
1971         *               the page forward again.
1972         */
1973        switch (ret) {
1974        case RB_PAGE_HEAD:
1975        case RB_PAGE_NORMAL:
1976                /* OK */
1977                break;
1978        default:
1979                RB_WARN_ON(cpu_buffer, 1);
1980                return -1;
1981        }
1982
1983        /*
1984         * It is possible that an interrupt came in,
1985         * set the head up, then more interrupts came in
1986         * and moved it again. When we get back here,
1987         * the page would have been set to NORMAL but we
1988         * just set it back to HEAD.
1989         *
1990         * How do you detect this? Well, if that happened
1991         * the tail page would have moved.
1992         */
1993        if (ret == RB_PAGE_NORMAL) {
1994                /*
1995                 * If the tail had moved passed next, then we need
1996                 * to reset the pointer.
1997                 */
1998                if (cpu_buffer->tail_page != tail_page &&
1999                    cpu_buffer->tail_page != next_page)
2000                        rb_head_page_set_normal(cpu_buffer, new_head,
2001                                                next_page,
2002                                                RB_PAGE_HEAD);
2003        }
2004
2005        /*
2006         * If this was the outer most commit (the one that
2007         * changed the original pointer from HEAD to UPDATE),
2008         * then it is up to us to reset it to NORMAL.
2009         */
2010        if (type == RB_PAGE_HEAD) {
2011                ret = rb_head_page_set_normal(cpu_buffer, next_page,
2012                                              tail_page,
2013                                              RB_PAGE_UPDATE);
2014                if (RB_WARN_ON(cpu_buffer,
2015                               ret != RB_PAGE_UPDATE))
2016                        return -1;
2017        }
2018
2019        return 0;
2020}
2021
2022static unsigned rb_calculate_event_length(unsigned length)
2023{
2024        struct ring_buffer_event event; /* Used only for sizeof array */
2025
2026        /* zero length can cause confusions */
2027        if (!length)
2028                length = 1;
2029
2030        if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2031                length += sizeof(event.array[0]);
2032
2033        length += RB_EVNT_HDR_SIZE;
2034        length = ALIGN(length, RB_ARCH_ALIGNMENT);
2035
2036        return length;
2037}
2038
2039static inline void
2040rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2041              struct buffer_page *tail_page,
2042              unsigned long tail, unsigned long length)
2043{
2044        struct ring_buffer_event *event;
2045
2046        /*
2047         * Only the event that crossed the page boundary
2048         * must fill the old tail_page with padding.
2049         */
2050        if (tail >= BUF_PAGE_SIZE) {
2051                /*
2052                 * If the page was filled, then we still need
2053                 * to update the real_end. Reset it to zero
2054                 * and the reader will ignore it.
2055                 */
2056                if (tail == BUF_PAGE_SIZE)
2057                        tail_page->real_end = 0;
2058
2059                local_sub(length, &tail_page->write);
2060                return;
2061        }
2062
2063        event = __rb_page_index(tail_page, tail);
2064        kmemcheck_annotate_bitfield(event, bitfield);
2065
2066        /* account for padding bytes */
2067        local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2068
2069        /*
2070         * Save the original length to the meta data.
2071         * This will be used by the reader to add lost event
2072         * counter.
2073         */
2074        tail_page->real_end = tail;
2075
2076        /*
2077         * If this event is bigger than the minimum size, then
2078         * we need to be careful that we don't subtract the
2079         * write counter enough to allow another writer to slip
2080         * in on this page.
2081         * We put in a discarded commit instead, to make sure
2082         * that this space is not used again.
2083         *
2084         * If we are less than the minimum size, we don't need to
2085         * worry about it.
2086         */
2087        if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2088                /* No room for any events */
2089
2090                /* Mark the rest of the page with padding */
2091                rb_event_set_padding(event);
2092
2093                /* Set the write back to the previous setting */
2094                local_sub(length, &tail_page->write);
2095                return;
2096        }
2097
2098        /* Put in a discarded event */
2099        event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2100        event->type_len = RINGBUF_TYPE_PADDING;
2101        /* time delta must be non zero */
2102        event->time_delta = 1;
2103
2104        /* Set write to end of buffer */
2105        length = (tail + length) - BUF_PAGE_SIZE;
2106        local_sub(length, &tail_page->write);
2107}
2108
2109/*
2110 * This is the slow path, force gcc not to inline it.
2111 */
2112static noinline struct ring_buffer_event *
2113rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2114             unsigned long length, unsigned long tail,
2115             struct buffer_page *tail_page, u64 ts)
2116{
2117        struct buffer_page *commit_page = cpu_buffer->commit_page;
2118        struct ring_buffer *buffer = cpu_buffer->buffer;
2119        struct buffer_page *next_page;
2120        int ret;
2121
2122        next_page = tail_page;
2123
2124        rb_inc_page(cpu_buffer, &next_page);
2125
2126        /*
2127         * If for some reason, we had an interrupt storm that made
2128         * it all the way around the buffer, bail, and warn
2129         * about it.
2130         */
2131        if (unlikely(next_page == commit_page)) {
2132                local_inc(&cpu_buffer->commit_overrun);
2133                goto out_reset;
2134        }
2135
2136        /*
2137         * This is where the fun begins!
2138         *
2139         * We are fighting against races between a reader that
2140         * could be on another CPU trying to swap its reader
2141         * page with the buffer head.
2142         *
2143         * We are also fighting against interrupts coming in and
2144         * moving the head or tail on us as well.
2145         *
2146         * If the next page is the head page then we have filled
2147         * the buffer, unless the commit page is still on the
2148         * reader page.
2149         */
2150        if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2151
2152                /*
2153                 * If the commit is not on the reader page, then
2154                 * move the header page.
2155                 */
2156                if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2157                        /*
2158                         * If we are not in overwrite mode,
2159                         * this is easy, just stop here.
2160                         */
2161                        if (!(buffer->flags & RB_FL_OVERWRITE)) {
2162                                local_inc(&cpu_buffer->dropped_events);
2163                                goto out_reset;
2164                        }
2165
2166                        ret = rb_handle_head_page(cpu_buffer,
2167                                                  tail_page,
2168                                                  next_page);
2169                        if (ret < 0)
2170                                goto out_reset;
2171                        if (ret)
2172                                goto out_again;
2173                } else {
2174                        /*
2175                         * We need to be careful here too. The
2176                         * commit page could still be on the reader
2177                         * page. We could have a small buffer, and
2178                         * have filled up the buffer with events
2179                         * from interrupts and such, and wrapped.
2180                         *
2181                         * Note, if the tail page is also the on the
2182                         * reader_page, we let it move out.
2183                         */
2184                        if (unlikely((cpu_buffer->commit_page !=
2185                                      cpu_buffer->tail_page) &&
2186                                     (cpu_buffer->commit_page ==
2187                                      cpu_buffer->reader_page))) {
2188                                local_inc(&cpu_buffer->commit_overrun);
2189                                goto out_reset;
2190                        }
2191                }
2192        }
2193
2194        ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
2195        if (ret) {
2196                /*
2197                 * Nested commits always have zero deltas, so
2198                 * just reread the time stamp
2199                 */
2200                ts = rb_time_stamp(buffer);
2201                next_page->page->time_stamp = ts;
2202        }
2203
2204 out_again:
2205
2206        rb_reset_tail(cpu_buffer, tail_page, tail, length);
2207
2208        /* fail and let the caller try again */
2209        return ERR_PTR(-EAGAIN);
2210
2211 out_reset:
2212        /* reset write */
2213        rb_reset_tail(cpu_buffer, tail_page, tail, length);
2214
2215        return NULL;
2216}
2217
2218static struct ring_buffer_event *
2219__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2220                  unsigned long length, u64 ts,
2221                  u64 delta, int add_timestamp)
2222{
2223        struct buffer_page *tail_page;
2224        struct ring_buffer_event *event;
2225        unsigned long tail, write;
2226
2227        /*
2228         * If the time delta since the last event is too big to
2229         * hold in the time field of the event, then we append a
2230         * TIME EXTEND event ahead of the data event.
2231         */
2232        if (unlikely(add_timestamp))
2233                length += RB_LEN_TIME_EXTEND;
2234
2235        tail_page = cpu_buffer->tail_page;
2236        write = local_add_return(length, &tail_page->write);
2237
2238        /* set write to only the index of the write */
2239        write &= RB_WRITE_MASK;
2240        tail = write - length;
2241
2242        /* See if we shot pass the end of this buffer page */
2243        if (unlikely(write > BUF_PAGE_SIZE))
2244                return rb_move_tail(cpu_buffer, length, tail,
2245                                    tail_page, ts);
2246
2247        /* We reserved something on the buffer */
2248
2249        event = __rb_page_index(tail_page, tail);
2250        kmemcheck_annotate_bitfield(event, bitfield);
2251        rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
2252
2253        local_inc(&tail_page->entries);
2254
2255        /*
2256         * If this is the first commit on the page, then update
2257         * its timestamp.
2258         */
2259        if (!tail)
2260                tail_page->page->time_stamp = ts;
2261
2262        /* account for these added bytes */
2263        local_add(length, &cpu_buffer->entries_bytes);
2264
2265        return event;
2266}
2267
2268static inline int
2269rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2270                  struct ring_buffer_event *event)
2271{
2272        unsigned long new_index, old_index;
2273        struct buffer_page *bpage;
2274        unsigned long index;
2275        unsigned long addr;
2276
2277        new_index = rb_event_index(event);
2278        old_index = new_index + rb_event_ts_length(event);
2279        addr = (unsigned long)event;
2280        addr &= PAGE_MASK;
2281
2282        bpage = cpu_buffer->tail_page;
2283
2284        if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2285                unsigned long write_mask =
2286                        local_read(&bpage->write) & ~RB_WRITE_MASK;
2287                unsigned long event_length = rb_event_length(event);
2288                /*
2289                 * This is on the tail page. It is possible that
2290                 * a write could come in and move the tail page
2291                 * and write to the next page. That is fine
2292                 * because we just shorten what is on this page.
2293                 */
2294                old_index += write_mask;
2295                new_index += write_mask;
2296                index = local_cmpxchg(&bpage->write, old_index, new_index);
2297                if (index == old_index) {
2298                        /* update counters */
2299                        local_sub(event_length, &cpu_buffer->entries_bytes);
2300                        return 1;
2301                }
2302        }
2303
2304        /* could not discard */
2305        return 0;
2306}
2307
2308static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2309{
2310        local_inc(&cpu_buffer->committing);
2311        local_inc(&cpu_buffer->commits);
2312}
2313
2314static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2315{
2316        unsigned long commits;
2317
2318        if (RB_WARN_ON(cpu_buffer,
2319                       !local_read(&cpu_buffer->committing)))
2320                return;
2321
2322 again:
2323        commits = local_read(&cpu_buffer->commits);
2324        /* synchronize with interrupts */
2325        barrier();
2326        if (local_read(&cpu_buffer->committing) == 1)
2327                rb_set_commit_to_write(cpu_buffer);
2328
2329        local_dec(&cpu_buffer->committing);
2330
2331        /* synchronize with interrupts */
2332        barrier();
2333
2334        /*
2335         * Need to account for interrupts coming in between the
2336         * updating of the commit page and the clearing of the
2337         * committing counter.
2338         */
2339        if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2340            !local_read(&cpu_buffer->committing)) {
2341                local_inc(&cpu_buffer->committing);
2342                goto again;
2343        }
2344}
2345
2346static struct ring_buffer_event *
2347rb_reserve_next_event(struct ring_buffer *buffer,
2348                      struct ring_buffer_per_cpu *cpu_buffer,
2349                      unsigned long length)
2350{
2351        struct ring_buffer_event *event;
2352        u64 ts, delta;
2353        int nr_loops = 0;
2354        int add_timestamp;
2355        u64 diff;
2356
2357        rb_start_commit(cpu_buffer);
2358
2359#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2360        /*
2361         * Due to the ability to swap a cpu buffer from a buffer
2362         * it is possible it was swapped before we committed.
2363         * (committing stops a swap). We check for it here and
2364         * if it happened, we have to fail the write.
2365         */
2366        barrier();
2367        if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2368                local_dec(&cpu_buffer->committing);
2369                local_dec(&cpu_buffer->commits);
2370                return NULL;
2371        }
2372#endif
2373
2374        length = rb_calculate_event_length(length);
2375 again:
2376        add_timestamp = 0;
2377        delta = 0;
2378
2379        /*
2380         * We allow for interrupts to reenter here and do a trace.
2381         * If one does, it will cause this original code to loop
2382         * back here. Even with heavy interrupts happening, this
2383         * should only happen a few times in a row. If this happens
2384         * 1000 times in a row, there must be either an interrupt
2385         * storm or we have something buggy.
2386         * Bail!
2387         */
2388        if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2389                goto out_fail;
2390
2391        ts = rb_time_stamp(cpu_buffer->buffer);
2392        diff = ts - cpu_buffer->write_stamp;
2393
2394        /* make sure this diff is calculated here */
2395        barrier();
2396
2397        /* Did the write stamp get updated already? */
2398        if (likely(ts >= cpu_buffer->write_stamp)) {
2399                delta = diff;
2400                if (unlikely(test_time_stamp(delta))) {
2401                        int local_clock_stable = 1;
2402#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2403                        local_clock_stable = sched_clock_stable;
2404#endif
2405                        WARN_ONCE(delta > (1ULL << 59),
2406                                  KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2407                                  (unsigned long long)delta,
2408                                  (unsigned long long)ts,
2409                                  (unsigned long long)cpu_buffer->write_stamp,
2410                                  local_clock_stable ? "" :
2411                                  "If you just came from a suspend/resume,\n"
2412                                  "please switch to the trace global clock:\n"
2413                                  "  echo global > /sys/kernel/debug/tracing/trace_clock\n");
2414                        add_timestamp = 1;
2415                }
2416        }
2417
2418        event = __rb_reserve_next(cpu_buffer, length, ts,
2419                                  delta, add_timestamp);
2420        if (unlikely(PTR_ERR(event) == -EAGAIN))
2421                goto again;
2422
2423        if (!event)
2424                goto out_fail;
2425
2426        return event;
2427
2428 out_fail:
2429        rb_end_commit(cpu_buffer);
2430        return NULL;
2431}
2432
2433#ifdef CONFIG_TRACING
2434
2435#define TRACE_RECURSIVE_DEPTH 16
2436
2437/* Keep this code out of the fast path cache */
2438static noinline void trace_recursive_fail(void)
2439{
2440        /* Disable all tracing before we do anything else */
2441        tracing_off_permanent();
2442
2443        printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
2444                    "HC[%lu]:SC[%lu]:NMI[%lu]\n",
2445                    trace_recursion_buffer(),
2446                    hardirq_count() >> HARDIRQ_SHIFT,
2447                    softirq_count() >> SOFTIRQ_SHIFT,
2448                    in_nmi());
2449
2450        WARN_ON_ONCE(1);
2451}
2452
2453static inline int trace_recursive_lock(void)
2454{
2455        trace_recursion_inc();
2456
2457        if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH))
2458                return 0;
2459
2460        trace_recursive_fail();
2461
2462        return -1;
2463}
2464
2465static inline void trace_recursive_unlock(void)
2466{
2467        WARN_ON_ONCE(!trace_recursion_buffer());
2468
2469        trace_recursion_dec();
2470}
2471
2472#else
2473
2474#define trace_recursive_lock()          (0)
2475#define trace_recursive_unlock()        do { } while (0)
2476
2477#endif
2478
2479/**
2480 * ring_buffer_lock_reserve - reserve a part of the buffer
2481 * @buffer: the ring buffer to reserve from
2482 * @length: the length of the data to reserve (excluding event header)
2483 *
2484 * Returns a reseverd event on the ring buffer to copy directly to.
2485 * The user of this interface will need to get the body to write into
2486 * and can use the ring_buffer_event_data() interface.
2487 *
2488 * The length is the length of the data needed, not the event length
2489 * which also includes the event header.
2490 *
2491 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2492 * If NULL is returned, then nothing has been allocated or locked.
2493 */
2494struct ring_buffer_event *
2495ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2496{
2497        struct ring_buffer_per_cpu *cpu_buffer;
2498        struct ring_buffer_event *event;
2499        int cpu;
2500
2501        if (ring_buffer_flags != RB_BUFFERS_ON)
2502                return NULL;
2503
2504        /* If we are tracing schedule, we don't want to recurse */
2505        preempt_disable_notrace();
2506
2507        if (atomic_read(&buffer->record_disabled))
2508                goto out_nocheck;
2509
2510        if (trace_recursive_lock())
2511                goto out_nocheck;
2512
2513        cpu = raw_smp_processor_id();
2514
2515        if (!cpumask_test_cpu(cpu, buffer->cpumask))
2516                goto out;
2517
2518        cpu_buffer = buffer->buffers[cpu];
2519
2520        if (atomic_read(&cpu_buffer->record_disabled))
2521                goto out;
2522
2523        if (length > BUF_MAX_DATA_SIZE)
2524                goto out;
2525
2526        event = rb_reserve_next_event(buffer, cpu_buffer, length);
2527        if (!event)
2528                goto out;
2529
2530        return event;
2531
2532 out:
2533        trace_recursive_unlock();
2534
2535 out_nocheck:
2536        preempt_enable_notrace();
2537        return NULL;
2538}
2539EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2540
2541static void
2542rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2543                      struct ring_buffer_event *event)
2544{
2545        u64 delta;
2546
2547        /*
2548         * The event first in the commit queue updates the
2549         * time stamp.
2550         */
2551        if (rb_event_is_commit(cpu_buffer, event)) {
2552                /*
2553                 * A commit event that is first on a page
2554                 * updates the write timestamp with the page stamp
2555                 */
2556                if (!rb_event_index(event))
2557                        cpu_buffer->write_stamp =
2558                                cpu_buffer->commit_page->page->time_stamp;
2559                else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2560                        delta = event->array[0];
2561                        delta <<= TS_SHIFT;
2562                        delta += event->time_delta;
2563                        cpu_buffer->write_stamp += delta;
2564                } else
2565                        cpu_buffer->write_stamp += event->time_delta;
2566        }
2567}
2568
2569static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2570                      struct ring_buffer_event *event)
2571{
2572        local_inc(&cpu_buffer->entries);
2573        rb_update_write_stamp(cpu_buffer, event);
2574        rb_end_commit(cpu_buffer);
2575}
2576
2577/**
2578 * ring_buffer_unlock_commit - commit a reserved
2579 * @buffer: The buffer to commit to
2580 * @event: The event pointer to commit.
2581 *
2582 * This commits the data to the ring buffer, and releases any locks held.
2583 *
2584 * Must be paired with ring_buffer_lock_reserve.
2585 */
2586int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2587                              struct ring_buffer_event *event)
2588{
2589        struct ring_buffer_per_cpu *cpu_buffer;
2590        int cpu = raw_smp_processor_id();
2591
2592        cpu_buffer = buffer->buffers[cpu];
2593
2594        rb_commit(cpu_buffer, event);
2595
2596        trace_recursive_unlock();
2597
2598        preempt_enable_notrace();
2599
2600        return 0;
2601}
2602EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2603
2604static inline void rb_event_discard(struct ring_buffer_event *event)
2605{
2606        if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2607                event = skip_time_extend(event);
2608
2609        /* array[0] holds the actual length for the discarded event */
2610        event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2611        event->type_len = RINGBUF_TYPE_PADDING;
2612        /* time delta must be non zero */
2613        if (!event->time_delta)
2614                event->time_delta = 1;
2615}
2616
2617/*
2618 * Decrement the entries to the page that an event is on.
2619 * The event does not even need to exist, only the pointer
2620 * to the page it is on. This may only be called before the commit
2621 * takes place.
2622 */
2623static inline void
2624rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2625                   struct ring_buffer_event *event)
2626{
2627        unsigned long addr = (unsigned long)event;
2628        struct buffer_page *bpage = cpu_buffer->commit_page;
2629        struct buffer_page *start;
2630
2631        addr &= PAGE_MASK;
2632
2633        /* Do the likely case first */
2634        if (likely(bpage->page == (void *)addr)) {
2635                local_dec(&bpage->entries);
2636                return;
2637        }
2638
2639        /*
2640         * Because the commit page may be on the reader page we
2641         * start with the next page and check the end loop there.
2642         */
2643        rb_inc_page(cpu_buffer, &bpage);
2644        start = bpage;
2645        do {
2646                if (bpage->page == (void *)addr) {
2647                        local_dec(&bpage->entries);
2648                        return;
2649                }
2650                rb_inc_page(cpu_buffer, &bpage);
2651        } while (bpage != start);
2652
2653        /* commit not part of this buffer?? */
2654        RB_WARN_ON(cpu_buffer, 1);
2655}
2656
2657/**
2658 * ring_buffer_commit_discard - discard an event that has not been committed
2659 * @buffer: the ring buffer
2660 * @event: non committed event to discard
2661 *
2662 * Sometimes an event that is in the ring buffer needs to be ignored.
2663 * This function lets the user discard an event in the ring buffer
2664 * and then that event will not be read later.
2665 *
2666 * This function only works if it is called before the the item has been
2667 * committed. It will try to free the event from the ring buffer
2668 * if another event has not been added behind it.
2669 *
2670 * If another event has been added behind it, it will set the event
2671 * up as discarded, and perform the commit.
2672 *
2673 * If this function is called, do not call ring_buffer_unlock_commit on
2674 * the event.
2675 */
2676void ring_buffer_discard_commit(struct ring_buffer *buffer,
2677                                struct ring_buffer_event *event)
2678{
2679        struct ring_buffer_per_cpu *cpu_buffer;
2680        int cpu;
2681
2682        /* The event is discarded regardless */
2683        rb_event_discard(event);
2684
2685        cpu = smp_processor_id();
2686        cpu_buffer = buffer->buffers[cpu];
2687
2688        /*
2689         * This must only be called if the event has not been
2690         * committed yet. Thus we can assume that preemption
2691         * is still disabled.
2692         */
2693        RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2694
2695        rb_decrement_entry(cpu_buffer, event);
2696        if (rb_try_to_discard(cpu_buffer, event))
2697                goto out;
2698
2699        /*
2700         * The commit is still visible by the reader, so we
2701         * must still update the timestamp.
2702         */
2703        rb_update_write_stamp(cpu_buffer, event);
2704 out:
2705        rb_end_commit(cpu_buffer);
2706
2707        trace_recursive_unlock();
2708
2709        preempt_enable_notrace();
2710
2711}
2712EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2713
2714/**
2715 * ring_buffer_write - write data to the buffer without reserving
2716 * @buffer: The ring buffer to write to.
2717 * @length: The length of the data being written (excluding the event header)
2718 * @data: The data to write to the buffer.
2719 *
2720 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2721 * one function. If you already have the data to write to the buffer, it
2722 * may be easier to simply call this function.
2723 *
2724 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2725 * and not the length of the event which would hold the header.
2726 */
2727int ring_buffer_write(struct ring_buffer *buffer,
2728                      unsigned long length,
2729                      void *data)
2730{
2731        struct ring_buffer_per_cpu *cpu_buffer;
2732        struct ring_buffer_event *event;
2733        void *body;
2734        int ret = -EBUSY;
2735        int cpu;
2736
2737        if (ring_buffer_flags != RB_BUFFERS_ON)
2738                return -EBUSY;
2739
2740        preempt_disable_notrace();
2741
2742        if (atomic_read(&buffer->record_disabled))
2743                goto out;
2744
2745        cpu = raw_smp_processor_id();
2746
2747        if (!cpumask_test_cpu(cpu, buffer->cpumask))
2748                goto out;
2749
2750        cpu_buffer = buffer->buffers[cpu];
2751
2752        if (atomic_read(&cpu_buffer->record_disabled))
2753                goto out;
2754
2755        if (length > BUF_MAX_DATA_SIZE)
2756                goto out;
2757
2758        event = rb_reserve_next_event(buffer, cpu_buffer, length);
2759        if (!event)
2760                goto out;
2761
2762        body = rb_event_data(event);
2763
2764        memcpy(body, data, length);
2765
2766        rb_commit(cpu_buffer, event);
2767
2768        ret = 0;
2769 out:
2770        preempt_enable_notrace();
2771
2772        return ret;
2773}
2774EXPORT_SYMBOL_GPL(ring_buffer_write);
2775
2776static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
2777{
2778        struct buffer_page *reader = cpu_buffer->reader_page;
2779        struct buffer_page *head = rb_set_head_page(cpu_buffer);
2780        struct buffer_page *commit = cpu_buffer->commit_page;
2781
2782        /* In case of error, head will be NULL */
2783        if (unlikely(!head))
2784                return 1;
2785
2786        return reader->read == rb_page_commit(reader) &&
2787                (commit == reader ||
2788                 (commit == head &&
2789                  head->read == rb_page_commit(commit)));
2790}
2791
2792/**
2793 * ring_buffer_record_disable - stop all writes into the buffer
2794 * @buffer: The ring buffer to stop writes to.
2795 *
2796 * This prevents all writes to the buffer. Any attempt to write
2797 * to the buffer after this will fail and return NULL.
2798 *
2799 * The caller should call synchronize_sched() after this.
2800 */
2801void ring_buffer_record_disable(struct ring_buffer *buffer)
2802{
2803        atomic_inc(&buffer->record_disabled);
2804}
2805EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
2806
2807/**
2808 * ring_buffer_record_enable - enable writes to the buffer
2809 * @buffer: The ring buffer to enable writes
2810 *
2811 * Note, multiple disables will need the same number of enables
2812 * to truly enable the writing (much like preempt_disable).
2813 */
2814void ring_buffer_record_enable(struct ring_buffer *buffer)
2815{
2816        atomic_dec(&buffer->record_disabled);
2817}
2818EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
2819
2820/**
2821 * ring_buffer_record_off - stop all writes into the buffer
2822 * @buffer: The ring buffer to stop writes to.
2823 *
2824 * This prevents all writes to the buffer. Any attempt to write
2825 * to the buffer after this will fail and return NULL.
2826 *
2827 * This is different than ring_buffer_record_disable() as
2828 * it works like an on/off switch, where as the disable() version
2829 * must be paired with a enable().
2830 */
2831void ring_buffer_record_off(struct ring_buffer *buffer)
2832{
2833        unsigned int rd;
2834        unsigned int new_rd;
2835
2836        do {
2837                rd = atomic_read(&buffer->record_disabled);
2838                new_rd = rd | RB_BUFFER_OFF;
2839        } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
2840}
2841EXPORT_SYMBOL_GPL(ring_buffer_record_off);
2842
2843/**
2844 * ring_buffer_record_on - restart writes into the buffer
2845 * @buffer: The ring buffer to start writes to.
2846 *
2847 * This enables all writes to the buffer that was disabled by
2848 * ring_buffer_record_off().
2849 *
2850 * This is different than ring_buffer_record_enable() as
2851 * it works like an on/off switch, where as the enable() version
2852 * must be paired with a disable().
2853 */
2854void ring_buffer_record_on(struct ring_buffer *buffer)
2855{
2856        unsigned int rd;
2857        unsigned int new_rd;
2858
2859        do {
2860                rd = atomic_read(&buffer->record_disabled);
2861                new_rd = rd & ~RB_BUFFER_OFF;
2862        } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
2863}
2864EXPORT_SYMBOL_GPL(ring_buffer_record_on);
2865
2866/**
2867 * ring_buffer_record_is_on - return true if the ring buffer can write
2868 * @buffer: The ring buffer to see if write is enabled
2869 *
2870 * Returns true if the ring buffer is in a state that it accepts writes.
2871 */
2872int ring_buffer_record_is_on(struct ring_buffer *buffer)
2873{
2874        return !atomic_read(&buffer->record_disabled);
2875}
2876
2877/**
2878 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
2879 * @buffer: The ring buffer to stop writes to.
2880 * @cpu: The CPU buffer to stop
2881 *
2882 * This prevents all writes to the buffer. Any attempt to write
2883 * to the buffer after this will fail and return NULL.
2884 *
2885 * The caller should call synchronize_sched() after this.
2886 */
2887void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
2888{
2889        struct ring_buffer_per_cpu *cpu_buffer;
2890
2891        if (!cpumask_test_cpu(cpu, buffer->cpumask))
2892                return;
2893
2894        cpu_buffer = buffer->buffers[cpu];
2895        atomic_inc(&cpu_buffer->record_disabled);
2896}
2897EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
2898
2899/**
2900 * ring_buffer_record_enable_cpu - enable writes to the buffer
2901 * @buffer: The ring buffer to enable writes
2902 * @cpu: The CPU to enable.
2903 *
2904 * Note, multiple disables will need the same number of enables
2905 * to truly enable the writing (much like preempt_disable).
2906 */
2907void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
2908{
2909        struct ring_buffer_per_cpu *cpu_buffer;
2910
2911        if (!cpumask_test_cpu(cpu, buffer->cpumask))
2912                return;
2913
2914        cpu_buffer = buffer->buffers[cpu];
2915        atomic_dec(&cpu_buffer->record_disabled);
2916}
2917EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
2918
2919/*
2920 * The total entries in the ring buffer is the running counter
2921 * of entries entered into the ring buffer, minus the sum of
2922 * the entries read from the ring buffer and the number of
2923 * entries that were overwritten.
2924 */
2925static inline unsigned long
2926rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
2927{
2928        return local_read(&cpu_buffer->entries) -
2929                (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
2930}
2931
2932/**
2933 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
2934 * @buffer: The ring buffer
2935 * @cpu: The per CPU buffer to read from.
2936 */
2937u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
2938{
2939        unsigned long flags;
2940        struct ring_buffer_per_cpu *cpu_buffer;
2941        struct buffer_page *bpage;
2942        u64 ret = 0;
2943
2944        if (!cpumask_test_cpu(cpu, buffer->cpumask))
2945                return 0;
2946
2947        cpu_buffer = buffer->buffers[cpu];
2948        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2949        /*
2950         * if the tail is on reader_page, oldest time stamp is on the reader
2951         * page
2952         */
2953        if (cpu_buffer->tail_page == cpu_buffer->reader_page)
2954                bpage = cpu_buffer->reader_page;
2955        else
2956                bpage = rb_set_head_page(cpu_buffer);
2957        if (bpage)
2958                ret = bpage->page->time_stamp;
2959        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2960
2961        return ret;
2962}
2963EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
2964
2965/**
2966 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
2967 * @buffer: The ring buffer
2968 * @cpu: The per CPU buffer to read from.
2969 */
2970unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
2971{
2972        struct ring_buffer_per_cpu *cpu_buffer;
2973        unsigned long ret;
2974
2975        if (!cpumask_test_cpu(cpu, buffer->cpumask))
2976                return 0;
2977
2978        cpu_buffer = buffer->buffers[cpu];
2979        ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
2980
2981        return ret;
2982}
2983EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
2984
2985/**
2986 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
2987 * @buffer: The ring buffer
2988 * @cpu: The per CPU buffer to get the entries from.
2989 */
2990unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
2991{
2992        struct ring_buffer_per_cpu *cpu_buffer;
2993
2994        if (!cpumask_test_cpu(cpu, buffer->cpumask))
2995                return 0;
2996
2997        cpu_buffer = buffer->buffers[cpu];
2998
2999        return rb_num_of_entries(cpu_buffer);
3000}
3001EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3002
3003/**
3004 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3005 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3006 * @buffer: The ring buffer
3007 * @cpu: The per CPU buffer to get the number of overruns from
3008 */
3009unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3010{
3011        struct ring_buffer_per_cpu *cpu_buffer;
3012        unsigned long ret;
3013
3014        if (!cpumask_test_cpu(cpu, buffer->cpumask))
3015                return 0;
3016
3017        cpu_buffer = buffer->buffers[cpu];
3018        ret = local_read(&cpu_buffer->overrun);
3019
3020        return ret;
3021}
3022EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3023
3024/**
3025 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3026 * commits failing due to the buffer wrapping around while there are uncommitted
3027 * events, such as during an interrupt storm.
3028 * @buffer: The ring buffer
3029 * @cpu: The per CPU buffer to get the number of overruns from
3030 */
3031unsigned long
3032ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3033{
3034        struct ring_buffer_per_cpu *cpu_buffer;
3035        unsigned long ret;
3036
3037        if (!cpumask_test_cpu(cpu, buffer->cpumask))
3038                return 0;
3039
3040        cpu_buffer = buffer->buffers[cpu];
3041        ret = local_read(&cpu_buffer->commit_overrun);
3042
3043        return ret;
3044}
3045EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3046
3047/**
3048 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3049 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3050 * @buffer: The ring buffer
3051 * @cpu: The per CPU buffer to get the number of overruns from
3052 */
3053unsigned long
3054ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3055{
3056        struct ring_buffer_per_cpu *cpu_buffer;
3057        unsigned long ret;
3058
3059        if (!cpumask_test_cpu(cpu, buffer->cpumask))
3060                return 0;
3061
3062        cpu_buffer = buffer->buffers[cpu];
3063        ret = local_read(&cpu_buffer->dropped_events);
3064
3065        return ret;
3066}
3067EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3068
3069/**
3070 * ring_buffer_entries - get the number of entries in a buffer
3071 * @buffer: The ring buffer
3072 *
3073 * Returns the total number of entries in the ring buffer
3074 * (all CPU entries)
3075 */
3076unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3077{
3078        struct ring_buffer_per_cpu *cpu_buffer;
3079        unsigned long entries = 0;
3080        int cpu;
3081
3082        /* if you care about this being correct, lock the buffer */
3083        for_each_buffer_cpu(buffer, cpu) {
3084                cpu_buffer = buffer->buffers[cpu];
3085                entries += rb_num_of_entries(cpu_buffer);
3086        }
3087
3088        return entries;
3089}
3090EXPORT_SYMBOL_GPL(ring_buffer_entries);
3091
3092/**
3093 * ring_buffer_overruns - get the number of overruns in buffer
3094 * @buffer: The ring buffer
3095 *
3096 * Returns the total number of overruns in the ring buffer
3097 * (all CPU entries)
3098 */
3099unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3100{
3101        struct ring_buffer_per_cpu *cpu_buffer;
3102        unsigned long overruns = 0;
3103        int cpu;
3104
3105        /* if you care about this being correct, lock the buffer */
3106        for_each_buffer_cpu(buffer, cpu) {
3107                cpu_buffer = buffer->buffers[cpu];
3108                overruns += local_read(&cpu_buffer->overrun);
3109        }
3110
3111        return overruns;
3112}
3113EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3114
3115static void rb_iter_reset(struct ring_buffer_iter *iter)
3116{
3117        struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3118
3119        /* Iterator usage is expected to have record disabled */
3120        if (list_empty(&cpu_buffer->reader_page->list)) {
3121                iter->head_page = rb_set_head_page(cpu_buffer);
3122                if (unlikely(!iter->head_page))
3123                        return;
3124                iter->head = iter->head_page->read;
3125        } else {
3126                iter->head_page = cpu_buffer->reader_page;
3127                iter->head = cpu_buffer->reader_page->read;
3128        }
3129        if (iter->head)
3130                iter->read_stamp = cpu_buffer->read_stamp;
3131        else
3132                iter->read_stamp = iter->head_page->page->time_stamp;
3133        iter->cache_reader_page = cpu_buffer->reader_page;
3134        iter->cache_read = cpu_buffer->read;
3135}
3136
3137/**
3138 * ring_buffer_iter_reset - reset an iterator
3139 * @iter: The iterator to reset
3140 *
3141 * Resets the iterator, so that it will start from the beginning
3142 * again.
3143 */
3144void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3145{
3146        struct ring_buffer_per_cpu *cpu_buffer;
3147        unsigned long flags;
3148
3149        if (!iter)
3150                return;
3151
3152        cpu_buffer = iter->cpu_buffer;
3153
3154        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3155        rb_iter_reset(iter);
3156        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3157}
3158EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3159
3160/**
3161 * ring_buffer_iter_empty - check if an iterator has no more to read
3162 * @iter: The iterator to check
3163 */
3164int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3165{
3166        struct ring_buffer_per_cpu *cpu_buffer;
3167
3168        cpu_buffer = iter->cpu_buffer;
3169
3170        return iter->head_page == cpu_buffer->commit_page &&
3171                iter->head == rb_commit_index(cpu_buffer);
3172}
3173EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3174
3175static void
3176rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3177                     struct ring_buffer_event *event)
3178{
3179        u64 delta;
3180
3181        switch (event->type_len) {
3182        case RINGBUF_TYPE_PADDING:
3183                return;
3184
3185        case RINGBUF_TYPE_TIME_EXTEND:
3186                delta = event->array[0];
3187                delta <<= TS_SHIFT;
3188                delta += event->time_delta;
3189                cpu_buffer->read_stamp += delta;
3190                return;
3191
3192        case RINGBUF_TYPE_TIME_STAMP:
3193                /* FIXME: not implemented */
3194                return;
3195
3196        case RINGBUF_TYPE_DATA:
3197                cpu_buffer->read_stamp += event->time_delta;
3198                return;
3199
3200        default:
3201                BUG();
3202        }
3203        return;
3204}
3205
3206static void
3207rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3208                          struct ring_buffer_event *event)
3209{
3210        u64 delta;
3211
3212        switch (event->type_len) {
3213        case RINGBUF_TYPE_PADDING:
3214                return;
3215
3216        case RINGBUF_TYPE_TIME_EXTEND:
3217                delta = event->array[0];
3218                delta <<= TS_SHIFT;
3219                delta += event->time_delta;
3220                iter->read_stamp += delta;
3221                return;
3222
3223        case RINGBUF_TYPE_TIME_STAMP:
3224                /* FIXME: not implemented */
3225                return;
3226
3227        case RINGBUF_TYPE_DATA:
3228                iter->read_stamp += event->time_delta;
3229                return;
3230
3231        default:
3232                BUG();
3233        }
3234        return;
3235}
3236
3237static struct buffer_page *
3238rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3239{
3240        struct buffer_page *reader = NULL;
3241        unsigned long overwrite;
3242        unsigned long flags;
3243        int nr_loops = 0;
3244        int ret;
3245
3246        local_irq_save(flags);
3247        arch_spin_lock(&cpu_buffer->lock);
3248
3249 again:
3250        /*
3251         * This should normally only loop twice. But because the
3252         * start of the reader inserts an empty page, it causes
3253         * a case where we will loop three times. There should be no
3254         * reason to loop four times (that I know of).
3255         */
3256        if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3257                reader = NULL;
3258                goto out;
3259        }
3260
3261        reader = cpu_buffer->reader_page;
3262
3263        /* If there's more to read, return this page */
3264        if (cpu_buffer->reader_page->read < rb_page_size(reader))
3265                goto out;
3266
3267        /* Never should we have an index greater than the size */
3268        if (RB_WARN_ON(cpu_buffer,
3269                       cpu_buffer->reader_page->read > rb_page_size(reader)))
3270                goto out;
3271
3272        /* check if we caught up to the tail */
3273        reader = NULL;
3274        if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3275                goto out;
3276
3277        /* Don't bother swapping if the ring buffer is empty */
3278        if (rb_num_of_entries(cpu_buffer) == 0)
3279                goto out;
3280
3281        /*
3282         * Reset the reader page to size zero.
3283         */
3284        local_set(&cpu_buffer->reader_page->write, 0);
3285        local_set(&cpu_buffer->reader_page->entries, 0);
3286        local_set(&cpu_buffer->reader_page->page->commit, 0);
3287        cpu_buffer->reader_page->real_end = 0;
3288
3289 spin:
3290        /*
3291         * Splice the empty reader page into the list around the head.
3292         */
3293        reader = rb_set_head_page(cpu_buffer);
3294        if (!reader)
3295                goto out;
3296        cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3297        cpu_buffer->reader_page->list.prev = reader->list.prev;
3298
3299        /*
3300         * cpu_buffer->pages just needs to point to the buffer, it
3301         *  has no specific buffer page to point to. Lets move it out
3302         *  of our way so we don't accidentally swap it.
3303         */
3304        cpu_buffer->pages = reader->list.prev;
3305
3306        /* The reader page will be pointing to the new head */
3307        rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3308
3309        /*
3310         * We want to make sure we read the overruns after we set up our
3311         * pointers to the next object. The writer side does a
3312         * cmpxchg to cross pages which acts as the mb on the writer
3313         * side. Note, the reader will constantly fail the swap
3314         * while the writer is updating the pointers, so this
3315         * guarantees that the overwrite recorded here is the one we
3316         * want to compare with the last_overrun.
3317         */
3318        smp_mb();
3319        overwrite = local_read(&(cpu_buffer->overrun));
3320
3321        /*
3322         * Here's the tricky part.
3323         *
3324         * We need to move the pointer past the header page.
3325         * But we can only do that if a writer is not currently
3326         * moving it. The page before the header page has the
3327         * flag bit '1' set if it is pointing to the page we want.
3328         * but if the writer is in the process of moving it
3329         * than it will be '2' or already moved '0'.
3330         */
3331
3332        ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3333
3334        /*
3335         * If we did not convert it, then we must try again.
3336         */
3337        if (!ret)
3338                goto spin;
3339
3340        /*
3341         * Yeah! We succeeded in replacing the page.
3342         *
3343         * Now make the new head point back to the reader page.
3344         */
3345        rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3346        rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3347
3348        /* Finally update the reader page to the new head */
3349        cpu_buffer->reader_page = reader;
3350        rb_reset_reader_page(cpu_buffer);
3351
3352        if (overwrite != cpu_buffer->last_overrun) {
3353                cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3354                cpu_buffer->last_overrun = overwrite;
3355        }
3356
3357        goto again;
3358
3359 out:
3360        arch_spin_unlock(&cpu_buffer->lock);
3361        local_irq_restore(flags);
3362
3363        return reader;
3364}
3365
3366static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3367{
3368        struct ring_buffer_event *event;
3369        struct buffer_page *reader;
3370        unsigned length;
3371
3372        reader = rb_get_reader_page(cpu_buffer);
3373
3374        /* This function should not be called when buffer is empty */
3375        if (RB_WARN_ON(cpu_buffer, !reader))
3376                return;
3377
3378        event = rb_reader_event(cpu_buffer);
3379
3380        if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3381                cpu_buffer->read++;
3382
3383        rb_update_read_stamp(cpu_buffer, event);
3384
3385        length = rb_event_length(event);
3386        cpu_buffer->reader_page->read += length;
3387}
3388
3389static void rb_advance_iter(struct ring_buffer_iter *iter)
3390{
3391        struct ring_buffer_per_cpu *cpu_buffer;
3392        struct ring_buffer_event *event;
3393        unsigned length;
3394
3395        cpu_buffer = iter->cpu_buffer;
3396
3397        /*
3398         * Check if we are at the end of the buffer.
3399         */
3400        if (iter->head >= rb_page_size(iter->head_page)) {
3401                /* discarded commits can make the page empty */
3402                if (iter->head_page == cpu_buffer->commit_page)
3403                        return;
3404                rb_inc_iter(iter);
3405                return;
3406        }
3407
3408        event = rb_iter_head_event(iter);
3409
3410        length = rb_event_length(event);
3411
3412        /*
3413         * This should not be called to advance the header if we are
3414         * at the tail of the buffer.
3415         */
3416        if (RB_WARN_ON(cpu_buffer,
3417                       (iter->head_page == cpu_buffer->commit_page) &&
3418                       (iter->head + length > rb_commit_index(cpu_buffer))))
3419                return;
3420
3421        rb_update_iter_read_stamp(iter, event);
3422
3423        iter->head += length;
3424
3425        /* check for end of page padding */
3426        if ((iter->head >= rb_page_size(iter->head_page)) &&
3427            (iter->head_page != cpu_buffer->commit_page))
3428                rb_advance_iter(iter);
3429}
3430
3431static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3432{
3433        return cpu_buffer->lost_events;
3434}
3435
3436static struct ring_buffer_event *
3437rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3438               unsigned long *lost_events)
3439{
3440        struct ring_buffer_event *event;
3441        struct buffer_page *reader;
3442        int nr_loops = 0;
3443
3444 again:
3445        /*
3446         * We repeat when a time extend is encountered.
3447         * Since the time extend is always attached to a data event,
3448         * we should never loop more than once.
3449         * (We never hit the following condition more than twice).
3450         */
3451        if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3452                return NULL;
3453
3454        reader = rb_get_reader_page(cpu_buffer);
3455        if (!reader)
3456                return NULL;
3457
3458        event = rb_reader_event(cpu_buffer);
3459
3460        switch (event->type_len) {
3461        case RINGBUF_TYPE_PADDING:
3462                if (rb_null_event(event))
3463                        RB_WARN_ON(cpu_buffer, 1);
3464                /*
3465                 * Because the writer could be discarding every
3466                 * event it creates (which would probably be bad)
3467                 * if we were to go back to "again" then we may never
3468                 * catch up, and will trigger the warn on, or lock
3469                 * the box. Return the padding, and we will release
3470                 * the current locks, and try again.
3471                 */
3472                return event;
3473
3474        case RINGBUF_TYPE_TIME_EXTEND:
3475                /* Internal data, OK to advance */
3476                rb_advance_reader(cpu_buffer);
3477                goto again;
3478
3479        case RINGBUF_TYPE_TIME_STAMP:
3480                /* FIXME: not implemented */
3481                rb_advance_reader(cpu_buffer);
3482                goto again;
3483
3484        case RINGBUF_TYPE_DATA:
3485                if (ts) {
3486                        *ts = cpu_buffer->read_stamp + event->time_delta;
3487                        ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3488                                                         cpu_buffer->cpu, ts);
3489                }
3490                if (lost_events)
3491                        *lost_events = rb_lost_events(cpu_buffer);
3492                return event;
3493
3494        default:
3495                BUG();
3496        }
3497
3498        return NULL;
3499}
3500EXPORT_SYMBOL_GPL(ring_buffer_peek);
3501
3502static struct ring_buffer_event *
3503rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3504{
3505        struct ring_buffer *buffer;
3506        struct ring_buffer_per_cpu *cpu_buffer;
3507        struct ring_buffer_event *event;
3508        int nr_loops = 0;
3509
3510        cpu_buffer = iter->cpu_buffer;
3511        buffer = cpu_buffer->buffer;
3512
3513        /*
3514         * Check if someone performed a consuming read to
3515         * the buffer. A consuming read invalidates the iterator
3516         * and we need to reset the iterator in this case.
3517         */
3518        if (unlikely(iter->cache_read != cpu_buffer->read ||
3519                     iter->cache_reader_page != cpu_buffer->reader_page))
3520                rb_iter_reset(iter);
3521
3522 again:
3523        if (ring_buffer_iter_empty(iter))
3524                return NULL;
3525
3526        /*
3527         * We repeat when a time extend is encountered.
3528         * Since the time extend is always attached to a data event,
3529         * we should never loop more than once.
3530         * (We never hit the following condition more than twice).
3531         */
3532        if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3533                return NULL;
3534
3535        if (rb_per_cpu_empty(cpu_buffer))
3536                return NULL;
3537
3538        if (iter->head >= local_read(&iter->head_page->page->commit)) {
3539                rb_inc_iter(iter);
3540                goto again;
3541        }
3542
3543        event = rb_iter_head_event(iter);
3544
3545        switch (event->type_len) {
3546        case RINGBUF_TYPE_PADDING:
3547                if (rb_null_event(event)) {
3548                        rb_inc_iter(iter);
3549                        goto again;
3550                }
3551                rb_advance_iter(iter);
3552                return event;
3553
3554        case RINGBUF_TYPE_TIME_EXTEND:
3555                /* Internal data, OK to advance */
3556                rb_advance_iter(iter);
3557                goto again;
3558
3559        case RINGBUF_TYPE_TIME_STAMP:
3560                /* FIXME: not implemented */
3561                rb_advance_iter(iter);
3562                goto again;
3563
3564        case RINGBUF_TYPE_DATA:
3565                if (ts) {
3566                        *ts = iter->read_stamp + event->time_delta;
3567                        ring_buffer_normalize_time_stamp(buffer,
3568                                                         cpu_buffer->cpu, ts);
3569                }
3570                return event;
3571
3572        default:
3573                BUG();
3574        }
3575
3576        return NULL;
3577}
3578EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3579
3580static inline int rb_ok_to_lock(void)
3581{
3582        /*
3583         * If an NMI die dumps out the content of the ring buffer
3584         * do not grab locks. We also permanently disable the ring
3585         * buffer too. A one time deal is all you get from reading
3586         * the ring buffer from an NMI.
3587         */
3588        if (likely(!in_nmi()))
3589                return 1;
3590
3591        tracing_off_permanent();
3592        return 0;
3593}
3594
3595/**
3596 * ring_buffer_peek - peek at the next event to be read
3597 * @buffer: The ring buffer to read
3598 * @cpu: The cpu to peak at
3599 * @ts: The timestamp counter of this event.
3600 * @lost_events: a variable to store if events were lost (may be NULL)
3601 *
3602 * This will return the event that will be read next, but does
3603 * not consume the data.
3604 */
3605struct ring_buffer_event *
3606ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3607                 unsigned long *lost_events)
3608{
3609        struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3610        struct ring_buffer_event *event;
3611        unsigned long flags;
3612        int dolock;
3613
3614        if (!cpumask_test_cpu(cpu, buffer->cpumask))
3615                return NULL;
3616
3617        dolock = rb_ok_to_lock();
3618 again:
3619        local_irq_save(flags);
3620        if (dolock)
3621                raw_spin_lock(&cpu_buffer->reader_lock);
3622        event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3623        if (event && event->type_len == RINGBUF_TYPE_PADDING)
3624                rb_advance_reader(cpu_buffer);
3625        if (dolock)
3626                raw_spin_unlock(&cpu_buffer->reader_lock);
3627        local_irq_restore(flags);
3628
3629        if (event && event->type_len == RINGBUF_TYPE_PADDING)
3630                goto again;
3631
3632        return event;
3633}
3634
3635/**
3636 * ring_buffer_iter_peek - peek at the next event to be read
3637 * @iter: The ring buffer iterator
3638 * @ts: The timestamp counter of this event.
3639 *
3640 * This will return the event that will be read next, but does
3641 * not increment the iterator.
3642 */
3643struct ring_buffer_event *
3644ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3645{
3646        struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3647        struct ring_buffer_event *event;
3648        unsigned long flags;
3649
3650 again:
3651        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3652        event = rb_iter_peek(iter, ts);
3653        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3654
3655        if (event && event->type_len == RINGBUF_TYPE_PADDING)
3656                goto again;
3657
3658        return event;
3659}
3660
3661/**
3662 * ring_buffer_consume - return an event and consume it
3663 * @buffer: The ring buffer to get the next event from
3664 * @cpu: the cpu to read the buffer from
3665 * @ts: a variable to store the timestamp (may be NULL)
3666 * @lost_events: a variable to store if events were lost (may be NULL)
3667 *
3668 * Returns the next event in the ring buffer, and that event is consumed.
3669 * Meaning, that sequential reads will keep returning a different event,
3670 * and eventually empty the ring buffer if the producer is slower.
3671 */
3672struct ring_buffer_event *
3673ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3674                    unsigned long *lost_events)
3675{
3676        struct ring_buffer_per_cpu *cpu_buffer;
3677        struct ring_buffer_event *event = NULL;
3678        unsigned long flags;
3679        int dolock;
3680
3681        dolock = rb_ok_to_lock();
3682
3683 again:
3684        /* might be called in atomic */
3685        preempt_disable();
3686
3687        if (!cpumask_test_cpu(cpu, buffer->cpumask))
3688                goto out;
3689
3690        cpu_buffer = buffer->buffers[cpu];
3691        local_irq_save(flags);
3692        if (dolock)
3693                raw_spin_lock(&cpu_buffer->reader_lock);
3694
3695        event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3696        if (event) {
3697                cpu_buffer->lost_events = 0;
3698                rb_advance_reader(cpu_buffer);
3699        }
3700
3701        if (dolock)
3702                raw_spin_unlock(&cpu_buffer->reader_lock);
3703        local_irq_restore(flags);
3704
3705 out:
3706        preempt_enable();
3707
3708        if (event && event->type_len == RINGBUF_TYPE_PADDING)
3709                goto again;
3710
3711        return event;
3712}
3713EXPORT_SYMBOL_GPL(ring_buffer_consume);
3714
3715/**
3716 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
3717 * @buffer: The ring buffer to read from
3718 * @cpu: The cpu buffer to iterate over
3719 *
3720 * This performs the initial preparations necessary to iterate
3721 * through the buffer.  Memory is allocated, buffer recording
3722 * is disabled, and the iterator pointer is returned to the caller.
3723 *
3724 * Disabling buffer recordng prevents the reading from being
3725 * corrupted. This is not a consuming read, so a producer is not
3726 * expected.
3727 *
3728 * After a sequence of ring_buffer_read_prepare calls, the user is
3729 * expected to make at least one call to ring_buffer_prepare_sync.
3730 * Afterwards, ring_buffer_read_start is invoked to get things going
3731 * for real.
3732 *
3733 * This overall must be paired with ring_buffer_finish.
3734 */
3735struct ring_buffer_iter *
3736ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
3737{
3738        struct ring_buffer_per_cpu *cpu_buffer;
3739        struct ring_buffer_iter *iter;
3740
3741        if (!cpumask_test_cpu(cpu, buffer->cpumask))
3742                return NULL;
3743
3744        iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3745        if (!iter)
3746                return NULL;
3747
3748        cpu_buffer = buffer->buffers[cpu];
3749
3750        iter->cpu_buffer = cpu_buffer;
3751
3752        atomic_inc(&buffer->resize_disabled);
3753        atomic_inc(&cpu_buffer->record_disabled);
3754
3755        return iter;
3756}
3757EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
3758
3759/**
3760 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
3761 *
3762 * All previously invoked ring_buffer_read_prepare calls to prepare
3763 * iterators will be synchronized.  Afterwards, read_buffer_read_start
3764 * calls on those iterators are allowed.
3765 */
3766void
3767ring_buffer_read_prepare_sync(void)
3768{
3769        synchronize_sched();
3770}
3771EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
3772
3773/**
3774 * ring_buffer_read_start - start a non consuming read of the buffer
3775 * @iter: The iterator returned by ring_buffer_read_prepare
3776 *
3777 * This finalizes the startup of an iteration through the buffer.
3778 * The iterator comes from a call to ring_buffer_read_prepare and
3779 * an intervening ring_buffer_read_prepare_sync must have been
3780 * performed.
3781 *
3782 * Must be paired with ring_buffer_finish.
3783 */
3784void
3785ring_buffer_read_start(struct ring_buffer_iter *iter)
3786{
3787        struct ring_buffer_per_cpu *cpu_buffer;
3788        unsigned long flags;
3789
3790        if (!iter)
3791                return;
3792
3793        cpu_buffer = iter->cpu_buffer;
3794
3795        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3796        arch_spin_lock(&cpu_buffer->lock);
3797        rb_iter_reset(iter);
3798        arch_spin_unlock(&cpu_buffer->lock);
3799        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3800}
3801EXPORT_SYMBOL_GPL(ring_buffer_read_start);
3802
3803/**
3804 * ring_buffer_finish - finish reading the iterator of the buffer
3805 * @iter: The iterator retrieved by ring_buffer_start
3806 *
3807 * This re-enables the recording to the buffer, and frees the
3808 * iterator.
3809 */
3810void
3811ring_buffer_read_finish(struct ring_buffer_iter *iter)
3812{
3813        struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3814        unsigned long flags;
3815
3816        /*
3817         * Ring buffer is disabled from recording, here's a good place
3818         * to check the integrity of the ring buffer.
3819         * Must prevent readers from trying to read, as the check
3820         * clears the HEAD page and readers require it.
3821         */
3822        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3823        rb_check_pages(cpu_buffer);
3824        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3825
3826        atomic_dec(&cpu_buffer->record_disabled);
3827        atomic_dec(&cpu_buffer->buffer->resize_disabled);
3828        kfree(iter);
3829}
3830EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
3831
3832/**
3833 * ring_buffer_read - read the next item in the ring buffer by the iterator
3834 * @iter: The ring buffer iterator
3835 * @ts: The time stamp of the event read.
3836 *
3837 * This reads the next event in the ring buffer and increments the iterator.
3838 */
3839struct ring_buffer_event *
3840ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3841{
3842        struct ring_buffer_event *event;
3843        struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3844        unsigned long flags;
3845
3846        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3847 again:
3848        event = rb_iter_peek(iter, ts);
3849        if (!event)
3850                goto out;
3851
3852        if (event->type_len == RINGBUF_TYPE_PADDING)
3853                goto again;
3854
3855        rb_advance_iter(iter);
3856 out:
3857        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3858
3859        return event;
3860}
3861EXPORT_SYMBOL_GPL(ring_buffer_read);
3862
3863/**
3864 * ring_buffer_size - return the size of the ring buffer (in bytes)
3865 * @buffer: The ring buffer.
3866 */
3867unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
3868{
3869        /*
3870         * Earlier, this method returned
3871         *      BUF_PAGE_SIZE * buffer->nr_pages
3872         * Since the nr_pages field is now removed, we have converted this to
3873         * return the per cpu buffer value.
3874         */
3875        if (!cpumask_test_cpu(cpu, buffer->cpumask))
3876                return 0;
3877
3878        return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
3879}
3880EXPORT_SYMBOL_GPL(ring_buffer_size);
3881
3882static void
3883rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3884{
3885        rb_head_page_deactivate(cpu_buffer);
3886
3887        cpu_buffer->head_page
3888                = list_entry(cpu_buffer->pages, struct buffer_page, list);
3889        local_set(&cpu_buffer->head_page->write, 0);
3890        local_set(&cpu_buffer->head_page->entries, 0);
3891        local_set(&cpu_buffer->head_page->page->commit, 0);
3892
3893        cpu_buffer->head_page->read = 0;
3894
3895        cpu_buffer->tail_page = cpu_buffer->head_page;
3896        cpu_buffer->commit_page = cpu_buffer->head_page;
3897
3898        INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
3899        INIT_LIST_HEAD(&cpu_buffer->new_pages);
3900        local_set(&cpu_buffer->reader_page->write, 0);
3901        local_set(&cpu_buffer->reader_page->entries, 0);
3902        local_set(&cpu_buffer->reader_page->page->commit, 0);
3903        cpu_buffer->reader_page->read = 0;
3904
3905        local_set(&cpu_buffer->entries_bytes, 0);
3906        local_set(&cpu_buffer->overrun, 0);
3907        local_set(&cpu_buffer->commit_overrun, 0);
3908        local_set(&cpu_buffer->dropped_events, 0);
3909        local_set(&cpu_buffer->entries, 0);
3910        local_set(&cpu_buffer->committing, 0);
3911        local_set(&cpu_buffer->commits, 0);
3912        cpu_buffer->read = 0;
3913        cpu_buffer->read_bytes = 0;
3914
3915        cpu_buffer->write_stamp = 0;
3916        cpu_buffer->read_stamp = 0;
3917
3918        cpu_buffer->lost_events = 0;
3919        cpu_buffer->last_overrun = 0;
3920
3921        rb_head_page_activate(cpu_buffer);
3922}
3923
3924/**
3925 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
3926 * @buffer: The ring buffer to reset a per cpu buffer of
3927 * @cpu: The CPU buffer to be reset
3928 */
3929void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3930{
3931        struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3932        unsigned long flags;
3933
3934        if (!cpumask_test_cpu(cpu, buffer->cpumask))
3935                return;
3936
3937        atomic_inc(&buffer->resize_disabled);
3938        atomic_inc(&cpu_buffer->record_disabled);
3939
3940        /* Make sure all commits have finished */
3941        synchronize_sched();
3942
3943        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3944
3945        if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3946                goto out;
3947
3948        arch_spin_lock(&cpu_buffer->lock);
3949
3950        rb_reset_cpu(cpu_buffer);
3951
3952        arch_spin_unlock(&cpu_buffer->lock);
3953
3954 out:
3955        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3956
3957        atomic_dec(&cpu_buffer->record_disabled);
3958        atomic_dec(&buffer->resize_disabled);
3959}
3960EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
3961
3962/**
3963 * ring_buffer_reset - reset a ring buffer
3964 * @buffer: The ring buffer to reset all cpu buffers
3965 */
3966void ring_buffer_reset(struct ring_buffer *buffer)
3967{
3968        int cpu;
3969
3970        for_each_buffer_cpu(buffer, cpu)
3971                ring_buffer_reset_cpu(buffer, cpu);
3972}
3973EXPORT_SYMBOL_GPL(ring_buffer_reset);
3974
3975/**
3976 * rind_buffer_empty - is the ring buffer empty?
3977 * @buffer: The ring buffer to test
3978 */
3979int ring_buffer_empty(struct ring_buffer *buffer)
3980{
3981        struct ring_buffer_per_cpu *cpu_buffer;
3982        unsigned long flags;
3983        int dolock;
3984        int cpu;
3985        int ret;
3986
3987        dolock = rb_ok_to_lock();
3988
3989        /* yes this is racy, but if you don't like the race, lock the buffer */
3990        for_each_buffer_cpu(buffer, cpu) {
3991                cpu_buffer = buffer->buffers[cpu];
3992                local_irq_save(flags);
3993                if (dolock)
3994                        raw_spin_lock(&cpu_buffer->reader_lock);
3995                ret = rb_per_cpu_empty(cpu_buffer);
3996                if (dolock)
3997                        raw_spin_unlock(&cpu_buffer->reader_lock);
3998                local_irq_restore(flags);
3999
4000                if (!ret)
4001                        return 0;
4002        }
4003
4004        return 1;
4005}
4006EXPORT_SYMBOL_GPL(ring_buffer_empty);
4007
4008/**
4009 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4010 * @buffer: The ring buffer
4011 * @cpu: The CPU buffer to test
4012 */
4013int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4014{
4015        struct ring_buffer_per_cpu *cpu_buffer;
4016        unsigned long flags;
4017        int dolock;
4018        int ret;
4019
4020        if (!cpumask_test_cpu(cpu, buffer->cpumask))
4021                return 1;
4022
4023        dolock = rb_ok_to_lock();
4024
4025        cpu_buffer = buffer->buffers[cpu];
4026        local_irq_save(flags);
4027        if (dolock)
4028                raw_spin_lock(&cpu_buffer->reader_lock);
4029        ret = rb_per_cpu_empty(cpu_buffer);
4030        if (dolock)
4031                raw_spin_unlock(&cpu_buffer->reader_lock);
4032        local_irq_restore(flags);
4033
4034        return ret;
4035}
4036EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4037
4038#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4039/**
4040 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4041 * @buffer_a: One buffer to swap with
4042 * @buffer_b: The other buffer to swap with
4043 *
4044 * This function is useful for tracers that want to take a "snapshot"
4045 * of a CPU buffer and has another back up buffer lying around.
4046 * it is expected that the tracer handles the cpu buffer not being
4047 * used at the moment.
4048 */
4049int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4050                         struct ring_buffer *buffer_b, int cpu)
4051{
4052        struct ring_buffer_per_cpu *cpu_buffer_a;
4053        struct ring_buffer_per_cpu *cpu_buffer_b;
4054        int ret = -EINVAL;
4055
4056        if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4057            !cpumask_test_cpu(cpu, buffer_b->cpumask))
4058                goto out;
4059
4060        cpu_buffer_a = buffer_a->buffers[cpu];
4061        cpu_buffer_b = buffer_b->buffers[cpu];
4062
4063        /* At least make sure the two buffers are somewhat the same */
4064        if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4065                goto out;
4066
4067        ret = -EAGAIN;
4068
4069        if (ring_buffer_flags != RB_BUFFERS_ON)
4070                goto out;
4071
4072        if (atomic_read(&buffer_a->record_disabled))
4073                goto out;
4074
4075        if (atomic_read(&buffer_b->record_disabled))
4076                goto out;
4077
4078        if (atomic_read(&cpu_buffer_a->record_disabled))
4079                goto out;
4080
4081        if (atomic_read(&cpu_buffer_b->record_disabled))
4082                goto out;
4083
4084        /*
4085         * We can't do a synchronize_sched here because this
4086         * function can be called in atomic context.
4087         * Normally this will be called from the same CPU as cpu.
4088         * If not it's up to the caller to protect this.
4089         */
4090        atomic_inc(&cpu_buffer_a->record_disabled);
4091        atomic_inc(&cpu_buffer_b->record_disabled);
4092
4093        ret = -EBUSY;
4094        if (local_read(&cpu_buffer_a->committing))
4095                goto out_dec;
4096        if (local_read(&cpu_buffer_b->committing))
4097                goto out_dec;
4098
4099        buffer_a->buffers[cpu] = cpu_buffer_b;
4100        buffer_b->buffers[cpu] = cpu_buffer_a;
4101
4102        cpu_buffer_b->buffer = buffer_a;
4103        cpu_buffer_a->buffer = buffer_b;
4104
4105        ret = 0;
4106
4107out_dec:
4108        atomic_dec(&cpu_buffer_a->record_disabled);
4109        atomic_dec(&cpu_buffer_b->record_disabled);
4110out:
4111        return ret;
4112}
4113EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4114#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4115
4116/**
4117 * ring_buffer_alloc_read_page - allocate a page to read from buffer
4118 * @buffer: the buffer to allocate for.
4119 *
4120 * This function is used in conjunction with ring_buffer_read_page.
4121 * When reading a full page from the ring buffer, these functions
4122 * can be used to speed up the process. The calling function should
4123 * allocate a few pages first with this function. Then when it
4124 * needs to get pages from the ring buffer, it passes the result
4125 * of this function into ring_buffer_read_page, which will swap
4126 * the page that was allocated, with the read page of the buffer.
4127 *
4128 * Returns:
4129 *  The page allocated, or NULL on error.
4130 */
4131void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4132{
4133        struct buffer_data_page *bpage;
4134        struct page *page;
4135
4136        page = alloc_pages_node(cpu_to_node(cpu),
4137                                GFP_KERNEL | __GFP_NORETRY, 0);
4138        if (!page)
4139                return NULL;
4140
4141        bpage = page_address(page);
4142
4143        rb_init_page(bpage);
4144
4145        return bpage;
4146}
4147EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4148
4149/**
4150 * ring_buffer_free_read_page - free an allocated read page
4151 * @buffer: the buffer the page was allocate for
4152 * @data: the page to free
4153 *
4154 * Free a page allocated from ring_buffer_alloc_read_page.
4155 */
4156void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
4157{
4158        free_page((unsigned long)data);
4159}
4160EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4161
4162/**
4163 * ring_buffer_read_page - extract a page from the ring buffer
4164 * @buffer: buffer to extract from
4165 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4166 * @len: amount to extract
4167 * @cpu: the cpu of the buffer to extract
4168 * @full: should the extraction only happen when the page is full.
4169 *
4170 * This function will pull out a page from the ring buffer and consume it.
4171 * @data_page must be the address of the variable that was returned
4172 * from ring_buffer_alloc_read_page. This is because the page might be used
4173 * to swap with a page in the ring buffer.
4174 *
4175 * for example:
4176 *      rpage = ring_buffer_alloc_read_page(buffer);
4177 *      if (!rpage)
4178 *              return error;
4179 *      ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4180 *      if (ret >= 0)
4181 *              process_page(rpage, ret);
4182 *
4183 * When @full is set, the function will not return true unless
4184 * the writer is off the reader page.
4185 *
4186 * Note: it is up to the calling functions to handle sleeps and wakeups.
4187 *  The ring buffer can be used anywhere in the kernel and can not
4188 *  blindly call wake_up. The layer that uses the ring buffer must be
4189 *  responsible for that.
4190 *
4191 * Returns:
4192 *  >=0 if data has been transferred, returns the offset of consumed data.
4193 *  <0 if no data has been transferred.
4194 */
4195int ring_buffer_read_page(struct ring_buffer *buffer,
4196                          void **data_page, size_t len, int cpu, int full)
4197{
4198        struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4199        struct ring_buffer_event *event;
4200        struct buffer_data_page *bpage;
4201        struct buffer_page *reader;
4202        unsigned long missed_events;
4203        unsigned long flags;
4204        unsigned int commit;
4205        unsigned int read;
4206        u64 save_timestamp;
4207        int ret = -1;
4208
4209        if (!cpumask_test_cpu(cpu, buffer->cpumask))
4210                goto out;
4211
4212        /*
4213         * If len is not big enough to hold the page header, then
4214         * we can not copy anything.
4215         */
4216        if (len <= BUF_PAGE_HDR_SIZE)
4217                goto out;
4218
4219        len -= BUF_PAGE_HDR_SIZE;
4220
4221        if (!data_page)
4222                goto out;
4223
4224        bpage = *data_page;
4225        if (!bpage)
4226                goto out;
4227
4228        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4229
4230        reader = rb_get_reader_page(cpu_buffer);
4231        if (!reader)
4232                goto out_unlock;
4233
4234        event = rb_reader_event(cpu_buffer);
4235
4236        read = reader->read;
4237        commit = rb_page_commit(reader);
4238
4239        /* Check if any events were dropped */
4240        missed_events = cpu_buffer->lost_events;
4241
4242        /*
4243         * If this page has been partially read or
4244         * if len is not big enough to read the rest of the page or
4245         * a writer is still on the page, then
4246         * we must copy the data from the page to the buffer.
4247         * Otherwise, we can simply swap the page with the one passed in.
4248         */
4249        if (read || (len < (commit - read)) ||
4250            cpu_buffer->reader_page == cpu_buffer->commit_page) {
4251                struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4252                unsigned int rpos = read;
4253                unsigned int pos = 0;
4254                unsigned int size;
4255
4256                if (full)
4257                        goto out_unlock;
4258
4259                if (len > (commit - read))
4260                        len = (commit - read);
4261
4262                /* Always keep the time extend and data together */
4263                size = rb_event_ts_length(event);
4264
4265                if (len < size)
4266                        goto out_unlock;
4267
4268                /* save the current timestamp, since the user will need it */
4269                save_timestamp = cpu_buffer->read_stamp;
4270
4271                /* Need to copy one event at a time */
4272                do {
4273                        /* We need the size of one event, because
4274                         * rb_advance_reader only advances by one event,
4275                         * whereas rb_event_ts_length may include the size of
4276                         * one or two events.
4277                         * We have already ensured there's enough space if this
4278                         * is a time extend. */
4279                        size = rb_event_length(event);
4280                        memcpy(bpage->data + pos, rpage->data + rpos, size);
4281
4282                        len -= size;
4283
4284                        rb_advance_reader(cpu_buffer);
4285                        rpos = reader->read;
4286                        pos += size;
4287
4288                        if (rpos >= commit)
4289                                break;
4290
4291                        event = rb_reader_event(cpu_buffer);
4292                        /* Always keep the time extend and data together */
4293                        size = rb_event_ts_length(event);
4294                } while (len >= size);
4295
4296                /* update bpage */
4297                local_set(&bpage->commit, pos);
4298                bpage->time_stamp = save_timestamp;
4299
4300                /* we copied everything to the beginning */
4301                read = 0;
4302        } else {
4303                /* update the entry counter */
4304                cpu_buffer->read += rb_page_entries(reader);
4305                cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4306
4307                /* swap the pages */
4308                rb_init_page(bpage);
4309                bpage = reader->page;
4310                reader->page = *data_page;
4311                local_set(&reader->write, 0);
4312                local_set(&reader->entries, 0);
4313                reader->read = 0;
4314                *data_page = bpage;
4315
4316                /*
4317                 * Use the real_end for the data size,
4318                 * This gives us a chance to store the lost events
4319                 * on the page.
4320                 */
4321                if (reader->real_end)
4322                        local_set(&bpage->commit, reader->real_end);
4323        }
4324        ret = read;
4325
4326        cpu_buffer->lost_events = 0;
4327
4328        commit = local_read(&bpage->commit);
4329        /*
4330         * Set a flag in the commit field if we lost events
4331         */
4332        if (missed_events) {
4333                /* If there is room at the end of the page to save the
4334                 * missed events, then record it there.
4335                 */
4336                if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4337                        memcpy(&bpage->data[commit], &missed_events,
4338                               sizeof(missed_events));
4339                        local_add(RB_MISSED_STORED, &bpage->commit);
4340                        commit += sizeof(missed_events);
4341                }
4342                local_add(RB_MISSED_EVENTS, &bpage->commit);
4343        }
4344
4345        /*
4346         * This page may be off to user land. Zero it out here.
4347         */
4348        if (commit < BUF_PAGE_SIZE)
4349                memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4350
4351 out_unlock:
4352        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4353
4354 out:
4355        return ret;
4356}
4357EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4358
4359#ifdef CONFIG_HOTPLUG_CPU
4360static int rb_cpu_notify(struct notifier_block *self,
4361                         unsigned long action, void *hcpu)
4362{
4363        struct ring_buffer *buffer =
4364                container_of(self, struct ring_buffer, cpu_notify);
4365        long cpu = (long)hcpu;
4366        int cpu_i, nr_pages_same;
4367        unsigned int nr_pages;
4368
4369        switch (action) {
4370        case CPU_UP_PREPARE:
4371        case CPU_UP_PREPARE_FROZEN:
4372                if (cpumask_test_cpu(cpu, buffer->cpumask))
4373                        return NOTIFY_OK;
4374
4375                nr_pages = 0;
4376                nr_pages_same = 1;
4377                /* check if all cpu sizes are same */
4378                for_each_buffer_cpu(buffer, cpu_i) {
4379                        /* fill in the size from first enabled cpu */
4380                        if (nr_pages == 0)
4381                                nr_pages = buffer->buffers[cpu_i]->nr_pages;
4382                        if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4383                                nr_pages_same = 0;
4384                                break;
4385                        }
4386                }
4387                /* allocate minimum pages, user can later expand it */
4388                if (!nr_pages_same)
4389                        nr_pages = 2;
4390                buffer->buffers[cpu] =
4391                        rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4392                if (!buffer->buffers[cpu]) {
4393                        WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4394                             cpu);
4395                        return NOTIFY_OK;
4396                }
4397                smp_wmb();
4398                cpumask_set_cpu(cpu, buffer->cpumask);
4399                break;
4400        case CPU_DOWN_PREPARE:
4401        case CPU_DOWN_PREPARE_FROZEN:
4402                /*
4403                 * Do nothing.
4404                 *  If we were to free the buffer, then the user would
4405                 *  lose any trace that was in the buffer.
4406                 */
4407                break;
4408        default:
4409                break;
4410        }
4411        return NOTIFY_OK;
4412}
4413#endif
4414