linux/kernel/printk/printk_ringbuffer.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/kernel.h>
   4#include <linux/irqflags.h>
   5#include <linux/string.h>
   6#include <linux/errno.h>
   7#include <linux/bug.h>
   8#include "printk_ringbuffer.h"
   9
  10/**
  11 * DOC: printk_ringbuffer overview
  12 *
  13 * Data Structure
  14 * --------------
  15 * The printk_ringbuffer is made up of 3 internal ringbuffers:
  16 *
  17 *   desc_ring
  18 *     A ring of descriptors and their meta data (such as sequence number,
  19 *     timestamp, loglevel, etc.) as well as internal state information about
  20 *     the record and logical positions specifying where in the other
  21 *     ringbuffer the text strings are located.
  22 *
  23 *   text_data_ring
  24 *     A ring of data blocks. A data block consists of an unsigned long
  25 *     integer (ID) that maps to a desc_ring index followed by the text
  26 *     string of the record.
  27 *
  28 * The internal state information of a descriptor is the key element to allow
  29 * readers and writers to locklessly synchronize access to the data.
  30 *
  31 * Implementation
  32 * --------------
  33 *
  34 * Descriptor Ring
  35 * ~~~~~~~~~~~~~~~
  36 * The descriptor ring is an array of descriptors. A descriptor contains
  37 * essential meta data to track the data of a printk record using
  38 * blk_lpos structs pointing to associated text data blocks (see
  39 * "Data Rings" below). Each descriptor is assigned an ID that maps
  40 * directly to index values of the descriptor array and has a state. The ID
  41 * and the state are bitwise combined into a single descriptor field named
  42 * @state_var, allowing ID and state to be synchronously and atomically
  43 * updated.
  44 *
  45 * Descriptors have four states:
  46 *
  47 *   reserved
  48 *     A writer is modifying the record.
  49 *
  50 *   committed
  51 *     The record and all its data are written. A writer can reopen the
  52 *     descriptor (transitioning it back to reserved), but in the committed
  53 *     state the data is consistent.
  54 *
  55 *   finalized
  56 *     The record and all its data are complete and available for reading. A
  57 *     writer cannot reopen the descriptor.
  58 *
  59 *   reusable
  60 *     The record exists, but its text and/or meta data may no longer be
  61 *     available.
  62 *
  63 * Querying the @state_var of a record requires providing the ID of the
  64 * descriptor to query. This can yield a possible fifth (pseudo) state:
  65 *
  66 *   miss
  67 *     The descriptor being queried has an unexpected ID.
  68 *
  69 * The descriptor ring has a @tail_id that contains the ID of the oldest
  70 * descriptor and @head_id that contains the ID of the newest descriptor.
  71 *
  72 * When a new descriptor should be created (and the ring is full), the tail
  73 * descriptor is invalidated by first transitioning to the reusable state and
  74 * then invalidating all tail data blocks up to and including the data blocks
  75 * associated with the tail descriptor (for the text ring). Then
  76 * @tail_id is advanced, followed by advancing @head_id. And finally the
  77 * @state_var of the new descriptor is initialized to the new ID and reserved
  78 * state.
  79 *
  80 * The @tail_id can only be advanced if the new @tail_id would be in the
  81 * committed or reusable queried state. This makes it possible that a valid
  82 * sequence number of the tail is always available.
  83 *
  84 * Descriptor Finalization
  85 * ~~~~~~~~~~~~~~~~~~~~~~~
  86 * When a writer calls the commit function prb_commit(), record data is
  87 * fully stored and is consistent within the ringbuffer. However, a writer can
  88 * reopen that record, claiming exclusive access (as with prb_reserve()), and
  89 * modify that record. When finished, the writer must again commit the record.
  90 *
  91 * In order for a record to be made available to readers (and also become
  92 * recyclable for writers), it must be finalized. A finalized record cannot be
  93 * reopened and can never become "unfinalized". Record finalization can occur
  94 * in three different scenarios:
  95 *
  96 *   1) A writer can simultaneously commit and finalize its record by calling
  97 *      prb_final_commit() instead of prb_commit().
  98 *
  99 *   2) When a new record is reserved and the previous record has been
 100 *      committed via prb_commit(), that previous record is automatically
 101 *      finalized.
 102 *
 103 *   3) When a record is committed via prb_commit() and a newer record
 104 *      already exists, the record being committed is automatically finalized.
 105 *
 106 * Data Ring
 107 * ~~~~~~~~~
 108 * The text data ring is a byte array composed of data blocks. Data blocks are
 109 * referenced by blk_lpos structs that point to the logical position of the
 110 * beginning of a data block and the beginning of the next adjacent data
 111 * block. Logical positions are mapped directly to index values of the byte
 112 * array ringbuffer.
 113 *
 114 * Each data block consists of an ID followed by the writer data. The ID is
 115 * the identifier of a descriptor that is associated with the data block. A
 116 * given data block is considered valid if all of the following conditions
 117 * are met:
 118 *
 119 *   1) The descriptor associated with the data block is in the committed
 120 *      or finalized queried state.
 121 *
 122 *   2) The blk_lpos struct within the descriptor associated with the data
 123 *      block references back to the same data block.
 124 *
 125 *   3) The data block is within the head/tail logical position range.
 126 *
 127 * If the writer data of a data block would extend beyond the end of the
 128 * byte array, only the ID of the data block is stored at the logical
 129 * position and the full data block (ID and writer data) is stored at the
 130 * beginning of the byte array. The referencing blk_lpos will point to the
 131 * ID before the wrap and the next data block will be at the logical
 132 * position adjacent the full data block after the wrap.
 133 *
 134 * Data rings have a @tail_lpos that points to the beginning of the oldest
 135 * data block and a @head_lpos that points to the logical position of the
 136 * next (not yet existing) data block.
 137 *
 138 * When a new data block should be created (and the ring is full), tail data
 139 * blocks will first be invalidated by putting their associated descriptors
 140 * into the reusable state and then pushing the @tail_lpos forward beyond
 141 * them. Then the @head_lpos is pushed forward and is associated with a new
 142 * descriptor. If a data block is not valid, the @tail_lpos cannot be
 143 * advanced beyond it.
 144 *
 145 * Info Array
 146 * ~~~~~~~~~~
 147 * The general meta data of printk records are stored in printk_info structs,
 148 * stored in an array with the same number of elements as the descriptor ring.
 149 * Each info corresponds to the descriptor of the same index in the
 150 * descriptor ring. Info validity is confirmed by evaluating the corresponding
 151 * descriptor before and after loading the info.
 152 *
 153 * Usage
 154 * -----
 155 * Here are some simple examples demonstrating writers and readers. For the
 156 * examples a global ringbuffer (test_rb) is available (which is not the
 157 * actual ringbuffer used by printk)::
 158 *
 159 *      DEFINE_PRINTKRB(test_rb, 15, 5);
 160 *
 161 * This ringbuffer allows up to 32768 records (2 ^ 15) and has a size of
 162 * 1 MiB (2 ^ (15 + 5)) for text data.
 163 *
 164 * Sample writer code::
 165 *
 166 *      const char *textstr = "message text";
 167 *      struct prb_reserved_entry e;
 168 *      struct printk_record r;
 169 *
 170 *      // specify how much to allocate
 171 *      prb_rec_init_wr(&r, strlen(textstr) + 1);
 172 *
 173 *      if (prb_reserve(&e, &test_rb, &r)) {
 174 *              snprintf(r.text_buf, r.text_buf_size, "%s", textstr);
 175 *
 176 *              r.info->text_len = strlen(textstr);
 177 *              r.info->ts_nsec = local_clock();
 178 *              r.info->caller_id = printk_caller_id();
 179 *
 180 *              // commit and finalize the record
 181 *              prb_final_commit(&e);
 182 *      }
 183 *
 184 * Note that additional writer functions are available to extend a record
 185 * after it has been committed but not yet finalized. This can be done as
 186 * long as no new records have been reserved and the caller is the same.
 187 *
 188 * Sample writer code (record extending)::
 189 *
 190 *              // alternate rest of previous example
 191 *
 192 *              r.info->text_len = strlen(textstr);
 193 *              r.info->ts_nsec = local_clock();
 194 *              r.info->caller_id = printk_caller_id();
 195 *
 196 *              // commit the record (but do not finalize yet)
 197 *              prb_commit(&e);
 198 *      }
 199 *
 200 *      ...
 201 *
 202 *      // specify additional 5 bytes text space to extend
 203 *      prb_rec_init_wr(&r, 5);
 204 *
 205 *      // try to extend, but only if it does not exceed 32 bytes
 206 *      if (prb_reserve_in_last(&e, &test_rb, &r, printk_caller_id()), 32) {
 207 *              snprintf(&r.text_buf[r.info->text_len],
 208 *                       r.text_buf_size - r.info->text_len, "hello");
 209 *
 210 *              r.info->text_len += 5;
 211 *
 212 *              // commit and finalize the record
 213 *              prb_final_commit(&e);
 214 *      }
 215 *
 216 * Sample reader code::
 217 *
 218 *      struct printk_info info;
 219 *      struct printk_record r;
 220 *      char text_buf[32];
 221 *      u64 seq;
 222 *
 223 *      prb_rec_init_rd(&r, &info, &text_buf[0], sizeof(text_buf));
 224 *
 225 *      prb_for_each_record(0, &test_rb, &seq, &r) {
 226 *              if (info.seq != seq)
 227 *                      pr_warn("lost %llu records\n", info.seq - seq);
 228 *
 229 *              if (info.text_len > r.text_buf_size) {
 230 *                      pr_warn("record %llu text truncated\n", info.seq);
 231 *                      text_buf[r.text_buf_size - 1] = 0;
 232 *              }
 233 *
 234 *              pr_info("%llu: %llu: %s\n", info.seq, info.ts_nsec,
 235 *                      &text_buf[0]);
 236 *      }
 237 *
 238 * Note that additional less convenient reader functions are available to
 239 * allow complex record access.
 240 *
 241 * ABA Issues
 242 * ~~~~~~~~~~
 243 * To help avoid ABA issues, descriptors are referenced by IDs (array index
 244 * values combined with tagged bits counting array wraps) and data blocks are
 245 * referenced by logical positions (array index values combined with tagged
 246 * bits counting array wraps). However, on 32-bit systems the number of
 247 * tagged bits is relatively small such that an ABA incident is (at least
 248 * theoretically) possible. For example, if 4 million maximally sized (1KiB)
 249 * printk messages were to occur in NMI context on a 32-bit system, the
 250 * interrupted context would not be able to recognize that the 32-bit integer
 251 * completely wrapped and thus represents a different data block than the one
 252 * the interrupted context expects.
 253 *
 254 * To help combat this possibility, additional state checking is performed
 255 * (such as using cmpxchg() even though set() would suffice). These extra
 256 * checks are commented as such and will hopefully catch any ABA issue that
 257 * a 32-bit system might experience.
 258 *
 259 * Memory Barriers
 260 * ~~~~~~~~~~~~~~~
 261 * Multiple memory barriers are used. To simplify proving correctness and
 262 * generating litmus tests, lines of code related to memory barriers
 263 * (loads, stores, and the associated memory barriers) are labeled::
 264 *
 265 *      LMM(function:letter)
 266 *
 267 * Comments reference the labels using only the "function:letter" part.
 268 *
 269 * The memory barrier pairs and their ordering are:
 270 *
 271 *   desc_reserve:D / desc_reserve:B
 272 *     push descriptor tail (id), then push descriptor head (id)
 273 *
 274 *   desc_reserve:D / data_push_tail:B
 275 *     push data tail (lpos), then set new descriptor reserved (state)
 276 *
 277 *   desc_reserve:D / desc_push_tail:C
 278 *     push descriptor tail (id), then set new descriptor reserved (state)
 279 *
 280 *   desc_reserve:D / prb_first_seq:C
 281 *     push descriptor tail (id), then set new descriptor reserved (state)
 282 *
 283 *   desc_reserve:F / desc_read:D
 284 *     set new descriptor id and reserved (state), then allow writer changes
 285 *
 286 *   data_alloc:A (or data_realloc:A) / desc_read:D
 287 *     set old descriptor reusable (state), then modify new data block area
 288 *
 289 *   data_alloc:A (or data_realloc:A) / data_push_tail:B
 290 *     push data tail (lpos), then modify new data block area
 291 *
 292 *   _prb_commit:B / desc_read:B
 293 *     store writer changes, then set new descriptor committed (state)
 294 *
 295 *   desc_reopen_last:A / _prb_commit:B
 296 *     set descriptor reserved (state), then read descriptor data
 297 *
 298 *   _prb_commit:B / desc_reserve:D
 299 *     set new descriptor committed (state), then check descriptor head (id)
 300 *
 301 *   data_push_tail:D / data_push_tail:A
 302 *     set descriptor reusable (state), then push data tail (lpos)
 303 *
 304 *   desc_push_tail:B / desc_reserve:D
 305 *     set descriptor reusable (state), then push descriptor tail (id)
 306 */
 307
 308#define DATA_SIZE(data_ring)            _DATA_SIZE((data_ring)->size_bits)
 309#define DATA_SIZE_MASK(data_ring)       (DATA_SIZE(data_ring) - 1)
 310
 311#define DESCS_COUNT(desc_ring)          _DESCS_COUNT((desc_ring)->count_bits)
 312#define DESCS_COUNT_MASK(desc_ring)     (DESCS_COUNT(desc_ring) - 1)
 313
 314/* Determine the data array index from a logical position. */
 315#define DATA_INDEX(data_ring, lpos)     ((lpos) & DATA_SIZE_MASK(data_ring))
 316
 317/* Determine the desc array index from an ID or sequence number. */
 318#define DESC_INDEX(desc_ring, n)        ((n) & DESCS_COUNT_MASK(desc_ring))
 319
 320/* Determine how many times the data array has wrapped. */
 321#define DATA_WRAPS(data_ring, lpos)     ((lpos) >> (data_ring)->size_bits)
 322
 323/* Determine if a logical position refers to a data-less block. */
 324#define LPOS_DATALESS(lpos)             ((lpos) & 1UL)
 325#define BLK_DATALESS(blk)               (LPOS_DATALESS((blk)->begin) && \
 326                                         LPOS_DATALESS((blk)->next))
 327
 328/* Get the logical position at index 0 of the current wrap. */
 329#define DATA_THIS_WRAP_START_LPOS(data_ring, lpos) \
 330((lpos) & ~DATA_SIZE_MASK(data_ring))
 331
 332/* Get the ID for the same index of the previous wrap as the given ID. */
 333#define DESC_ID_PREV_WRAP(desc_ring, id) \
 334DESC_ID((id) - DESCS_COUNT(desc_ring))
 335
 336/*
 337 * A data block: mapped directly to the beginning of the data block area
 338 * specified as a logical position within the data ring.
 339 *
 340 * @id:   the ID of the associated descriptor
 341 * @data: the writer data
 342 *
 343 * Note that the size of a data block is only known by its associated
 344 * descriptor.
 345 */
 346struct prb_data_block {
 347        unsigned long   id;
 348        char            data[];
 349};
 350
 351/*
 352 * Return the descriptor associated with @n. @n can be either a
 353 * descriptor ID or a sequence number.
 354 */
 355static struct prb_desc *to_desc(struct prb_desc_ring *desc_ring, u64 n)
 356{
 357        return &desc_ring->descs[DESC_INDEX(desc_ring, n)];
 358}
 359
 360/*
 361 * Return the printk_info associated with @n. @n can be either a
 362 * descriptor ID or a sequence number.
 363 */
 364static struct printk_info *to_info(struct prb_desc_ring *desc_ring, u64 n)
 365{
 366        return &desc_ring->infos[DESC_INDEX(desc_ring, n)];
 367}
 368
 369static struct prb_data_block *to_block(struct prb_data_ring *data_ring,
 370                                       unsigned long begin_lpos)
 371{
 372        return (void *)&data_ring->data[DATA_INDEX(data_ring, begin_lpos)];
 373}
 374
 375/*
 376 * Increase the data size to account for data block meta data plus any
 377 * padding so that the adjacent data block is aligned on the ID size.
 378 */
 379static unsigned int to_blk_size(unsigned int size)
 380{
 381        struct prb_data_block *db = NULL;
 382
 383        size += sizeof(*db);
 384        size = ALIGN(size, sizeof(db->id));
 385        return size;
 386}
 387
 388/*
 389 * Sanity checker for reserve size. The ringbuffer code assumes that a data
 390 * block does not exceed the maximum possible size that could fit within the
 391 * ringbuffer. This function provides that basic size check so that the
 392 * assumption is safe.
 393 */
 394static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size)
 395{
 396        struct prb_data_block *db = NULL;
 397
 398        if (size == 0)
 399                return true;
 400
 401        /*
 402         * Ensure the alignment padded size could possibly fit in the data
 403         * array. The largest possible data block must still leave room for
 404         * at least the ID of the next block.
 405         */
 406        size = to_blk_size(size);
 407        if (size > DATA_SIZE(data_ring) - sizeof(db->id))
 408                return false;
 409
 410        return true;
 411}
 412
 413/* Query the state of a descriptor. */
 414static enum desc_state get_desc_state(unsigned long id,
 415                                      unsigned long state_val)
 416{
 417        if (id != DESC_ID(state_val))
 418                return desc_miss;
 419
 420        return DESC_STATE(state_val);
 421}
 422
 423/*
 424 * Get a copy of a specified descriptor and return its queried state. If the
 425 * descriptor is in an inconsistent state (miss or reserved), the caller can
 426 * only expect the descriptor's @state_var field to be valid.
 427 *
 428 * The sequence number and caller_id can be optionally retrieved. Like all
 429 * non-state_var data, they are only valid if the descriptor is in a
 430 * consistent state.
 431 */
 432static enum desc_state desc_read(struct prb_desc_ring *desc_ring,
 433                                 unsigned long id, struct prb_desc *desc_out,
 434                                 u64 *seq_out, u32 *caller_id_out)
 435{
 436        struct printk_info *info = to_info(desc_ring, id);
 437        struct prb_desc *desc = to_desc(desc_ring, id);
 438        atomic_long_t *state_var = &desc->state_var;
 439        enum desc_state d_state;
 440        unsigned long state_val;
 441
 442        /* Check the descriptor state. */
 443        state_val = atomic_long_read(state_var); /* LMM(desc_read:A) */
 444        d_state = get_desc_state(id, state_val);
 445        if (d_state == desc_miss || d_state == desc_reserved) {
 446                /*
 447                 * The descriptor is in an inconsistent state. Set at least
 448                 * @state_var so that the caller can see the details of
 449                 * the inconsistent state.
 450                 */
 451                goto out;
 452        }
 453
 454        /*
 455         * Guarantee the state is loaded before copying the descriptor
 456         * content. This avoids copying obsolete descriptor content that might
 457         * not apply to the descriptor state. This pairs with _prb_commit:B.
 458         *
 459         * Memory barrier involvement:
 460         *
 461         * If desc_read:A reads from _prb_commit:B, then desc_read:C reads
 462         * from _prb_commit:A.
 463         *
 464         * Relies on:
 465         *
 466         * WMB from _prb_commit:A to _prb_commit:B
 467         *    matching
 468         * RMB from desc_read:A to desc_read:C
 469         */
 470        smp_rmb(); /* LMM(desc_read:B) */
 471
 472        /*
 473         * Copy the descriptor data. The data is not valid until the
 474         * state has been re-checked. A memcpy() for all of @desc
 475         * cannot be used because of the atomic_t @state_var field.
 476         */
 477        memcpy(&desc_out->text_blk_lpos, &desc->text_blk_lpos,
 478               sizeof(desc_out->text_blk_lpos)); /* LMM(desc_read:C) */
 479        if (seq_out)
 480                *seq_out = info->seq; /* also part of desc_read:C */
 481        if (caller_id_out)
 482                *caller_id_out = info->caller_id; /* also part of desc_read:C */
 483
 484        /*
 485         * 1. Guarantee the descriptor content is loaded before re-checking
 486         *    the state. This avoids reading an obsolete descriptor state
 487         *    that may not apply to the copied content. This pairs with
 488         *    desc_reserve:F.
 489         *
 490         *    Memory barrier involvement:
 491         *
 492         *    If desc_read:C reads from desc_reserve:G, then desc_read:E
 493         *    reads from desc_reserve:F.
 494         *
 495         *    Relies on:
 496         *
 497         *    WMB from desc_reserve:F to desc_reserve:G
 498         *       matching
 499         *    RMB from desc_read:C to desc_read:E
 500         *
 501         * 2. Guarantee the record data is loaded before re-checking the
 502         *    state. This avoids reading an obsolete descriptor state that may
 503         *    not apply to the copied data. This pairs with data_alloc:A and
 504         *    data_realloc:A.
 505         *
 506         *    Memory barrier involvement:
 507         *
 508         *    If copy_data:A reads from data_alloc:B, then desc_read:E
 509         *    reads from desc_make_reusable:A.
 510         *
 511         *    Relies on:
 512         *
 513         *    MB from desc_make_reusable:A to data_alloc:B
 514         *       matching
 515         *    RMB from desc_read:C to desc_read:E
 516         *
 517         *    Note: desc_make_reusable:A and data_alloc:B can be different
 518         *          CPUs. However, the data_alloc:B CPU (which performs the
 519         *          full memory barrier) must have previously seen
 520         *          desc_make_reusable:A.
 521         */
 522        smp_rmb(); /* LMM(desc_read:D) */
 523
 524        /*
 525         * The data has been copied. Return the current descriptor state,
 526         * which may have changed since the load above.
 527         */
 528        state_val = atomic_long_read(state_var); /* LMM(desc_read:E) */
 529        d_state = get_desc_state(id, state_val);
 530out:
 531        atomic_long_set(&desc_out->state_var, state_val);
 532        return d_state;
 533}
 534
 535/*
 536 * Take a specified descriptor out of the finalized state by attempting
 537 * the transition from finalized to reusable. Either this context or some
 538 * other context will have been successful.
 539 */
 540static void desc_make_reusable(struct prb_desc_ring *desc_ring,
 541                               unsigned long id)
 542{
 543        unsigned long val_finalized = DESC_SV(id, desc_finalized);
 544        unsigned long val_reusable = DESC_SV(id, desc_reusable);
 545        struct prb_desc *desc = to_desc(desc_ring, id);
 546        atomic_long_t *state_var = &desc->state_var;
 547
 548        atomic_long_cmpxchg_relaxed(state_var, val_finalized,
 549                                    val_reusable); /* LMM(desc_make_reusable:A) */
 550}
 551
 552/*
 553 * Given the text data ring, put the associated descriptor of each
 554 * data block from @lpos_begin until @lpos_end into the reusable state.
 555 *
 556 * If there is any problem making the associated descriptor reusable, either
 557 * the descriptor has not yet been finalized or another writer context has
 558 * already pushed the tail lpos past the problematic data block. Regardless,
 559 * on error the caller can re-load the tail lpos to determine the situation.
 560 */
 561static bool data_make_reusable(struct printk_ringbuffer *rb,
 562                               struct prb_data_ring *data_ring,
 563                               unsigned long lpos_begin,
 564                               unsigned long lpos_end,
 565                               unsigned long *lpos_out)
 566{
 567        struct prb_desc_ring *desc_ring = &rb->desc_ring;
 568        struct prb_data_block *blk;
 569        enum desc_state d_state;
 570        struct prb_desc desc;
 571        struct prb_data_blk_lpos *blk_lpos = &desc.text_blk_lpos;
 572        unsigned long id;
 573
 574        /* Loop until @lpos_begin has advanced to or beyond @lpos_end. */
 575        while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) {
 576                blk = to_block(data_ring, lpos_begin);
 577
 578                /*
 579                 * Load the block ID from the data block. This is a data race
 580                 * against a writer that may have newly reserved this data
 581                 * area. If the loaded value matches a valid descriptor ID,
 582                 * the blk_lpos of that descriptor will be checked to make
 583                 * sure it points back to this data block. If the check fails,
 584                 * the data area has been recycled by another writer.
 585                 */
 586                id = blk->id; /* LMM(data_make_reusable:A) */
 587
 588                d_state = desc_read(desc_ring, id, &desc,
 589                                    NULL, NULL); /* LMM(data_make_reusable:B) */
 590
 591                switch (d_state) {
 592                case desc_miss:
 593                case desc_reserved:
 594                case desc_committed:
 595                        return false;
 596                case desc_finalized:
 597                        /*
 598                         * This data block is invalid if the descriptor
 599                         * does not point back to it.
 600                         */
 601                        if (blk_lpos->begin != lpos_begin)
 602                                return false;
 603                        desc_make_reusable(desc_ring, id);
 604                        break;
 605                case desc_reusable:
 606                        /*
 607                         * This data block is invalid if the descriptor
 608                         * does not point back to it.
 609                         */
 610                        if (blk_lpos->begin != lpos_begin)
 611                                return false;
 612                        break;
 613                }
 614
 615                /* Advance @lpos_begin to the next data block. */
 616                lpos_begin = blk_lpos->next;
 617        }
 618
 619        *lpos_out = lpos_begin;
 620        return true;
 621}
 622
 623/*
 624 * Advance the data ring tail to at least @lpos. This function puts
 625 * descriptors into the reusable state if the tail is pushed beyond
 626 * their associated data block.
 627 */
 628static bool data_push_tail(struct printk_ringbuffer *rb,
 629                           struct prb_data_ring *data_ring,
 630                           unsigned long lpos)
 631{
 632        unsigned long tail_lpos_new;
 633        unsigned long tail_lpos;
 634        unsigned long next_lpos;
 635
 636        /* If @lpos is from a data-less block, there is nothing to do. */
 637        if (LPOS_DATALESS(lpos))
 638                return true;
 639
 640        /*
 641         * Any descriptor states that have transitioned to reusable due to the
 642         * data tail being pushed to this loaded value will be visible to this
 643         * CPU. This pairs with data_push_tail:D.
 644         *
 645         * Memory barrier involvement:
 646         *
 647         * If data_push_tail:A reads from data_push_tail:D, then this CPU can
 648         * see desc_make_reusable:A.
 649         *
 650         * Relies on:
 651         *
 652         * MB from desc_make_reusable:A to data_push_tail:D
 653         *    matches
 654         * READFROM from data_push_tail:D to data_push_tail:A
 655         *    thus
 656         * READFROM from desc_make_reusable:A to this CPU
 657         */
 658        tail_lpos = atomic_long_read(&data_ring->tail_lpos); /* LMM(data_push_tail:A) */
 659
 660        /*
 661         * Loop until the tail lpos is at or beyond @lpos. This condition
 662         * may already be satisfied, resulting in no full memory barrier
 663         * from data_push_tail:D being performed. However, since this CPU
 664         * sees the new tail lpos, any descriptor states that transitioned to
 665         * the reusable state must already be visible.
 666         */
 667        while ((lpos - tail_lpos) - 1 < DATA_SIZE(data_ring)) {
 668                /*
 669                 * Make all descriptors reusable that are associated with
 670                 * data blocks before @lpos.
 671                 */
 672                if (!data_make_reusable(rb, data_ring, tail_lpos, lpos,
 673                                        &next_lpos)) {
 674                        /*
 675                         * 1. Guarantee the block ID loaded in
 676                         *    data_make_reusable() is performed before
 677                         *    reloading the tail lpos. The failed
 678                         *    data_make_reusable() may be due to a newly
 679                         *    recycled data area causing the tail lpos to
 680                         *    have been previously pushed. This pairs with
 681                         *    data_alloc:A and data_realloc:A.
 682                         *
 683                         *    Memory barrier involvement:
 684                         *
 685                         *    If data_make_reusable:A reads from data_alloc:B,
 686                         *    then data_push_tail:C reads from
 687                         *    data_push_tail:D.
 688                         *
 689                         *    Relies on:
 690                         *
 691                         *    MB from data_push_tail:D to data_alloc:B
 692                         *       matching
 693                         *    RMB from data_make_reusable:A to
 694                         *    data_push_tail:C
 695                         *
 696                         *    Note: data_push_tail:D and data_alloc:B can be
 697                         *          different CPUs. However, the data_alloc:B
 698                         *          CPU (which performs the full memory
 699                         *          barrier) must have previously seen
 700                         *          data_push_tail:D.
 701                         *
 702                         * 2. Guarantee the descriptor state loaded in
 703                         *    data_make_reusable() is performed before
 704                         *    reloading the tail lpos. The failed
 705                         *    data_make_reusable() may be due to a newly
 706                         *    recycled descriptor causing the tail lpos to
 707                         *    have been previously pushed. This pairs with
 708                         *    desc_reserve:D.
 709                         *
 710                         *    Memory barrier involvement:
 711                         *
 712                         *    If data_make_reusable:B reads from
 713                         *    desc_reserve:F, then data_push_tail:C reads
 714                         *    from data_push_tail:D.
 715                         *
 716                         *    Relies on:
 717                         *
 718                         *    MB from data_push_tail:D to desc_reserve:F
 719                         *       matching
 720                         *    RMB from data_make_reusable:B to
 721                         *    data_push_tail:C
 722                         *
 723                         *    Note: data_push_tail:D and desc_reserve:F can
 724                         *          be different CPUs. However, the
 725                         *          desc_reserve:F CPU (which performs the
 726                         *          full memory barrier) must have previously
 727                         *          seen data_push_tail:D.
 728                         */
 729                        smp_rmb(); /* LMM(data_push_tail:B) */
 730
 731                        tail_lpos_new = atomic_long_read(&data_ring->tail_lpos
 732                                                        ); /* LMM(data_push_tail:C) */
 733                        if (tail_lpos_new == tail_lpos)
 734                                return false;
 735
 736                        /* Another CPU pushed the tail. Try again. */
 737                        tail_lpos = tail_lpos_new;
 738                        continue;
 739                }
 740
 741                /*
 742                 * Guarantee any descriptor states that have transitioned to
 743                 * reusable are stored before pushing the tail lpos. A full
 744                 * memory barrier is needed since other CPUs may have made
 745                 * the descriptor states reusable. This pairs with
 746                 * data_push_tail:A.
 747                 */
 748                if (atomic_long_try_cmpxchg(&data_ring->tail_lpos, &tail_lpos,
 749                                            next_lpos)) { /* LMM(data_push_tail:D) */
 750                        break;
 751                }
 752        }
 753
 754        return true;
 755}
 756
 757/*
 758 * Advance the desc ring tail. This function advances the tail by one
 759 * descriptor, thus invalidating the oldest descriptor. Before advancing
 760 * the tail, the tail descriptor is made reusable and all data blocks up to
 761 * and including the descriptor's data block are invalidated (i.e. the data
 762 * ring tail is pushed past the data block of the descriptor being made
 763 * reusable).
 764 */
 765static bool desc_push_tail(struct printk_ringbuffer *rb,
 766                           unsigned long tail_id)
 767{
 768        struct prb_desc_ring *desc_ring = &rb->desc_ring;
 769        enum desc_state d_state;
 770        struct prb_desc desc;
 771
 772        d_state = desc_read(desc_ring, tail_id, &desc, NULL, NULL);
 773
 774        switch (d_state) {
 775        case desc_miss:
 776                /*
 777                 * If the ID is exactly 1 wrap behind the expected, it is
 778                 * in the process of being reserved by another writer and
 779                 * must be considered reserved.
 780                 */
 781                if (DESC_ID(atomic_long_read(&desc.state_var)) ==
 782                    DESC_ID_PREV_WRAP(desc_ring, tail_id)) {
 783                        return false;
 784                }
 785
 786                /*
 787                 * The ID has changed. Another writer must have pushed the
 788                 * tail and recycled the descriptor already. Success is
 789                 * returned because the caller is only interested in the
 790                 * specified tail being pushed, which it was.
 791                 */
 792                return true;
 793        case desc_reserved:
 794        case desc_committed:
 795                return false;
 796        case desc_finalized:
 797                desc_make_reusable(desc_ring, tail_id);
 798                break;
 799        case desc_reusable:
 800                break;
 801        }
 802
 803        /*
 804         * Data blocks must be invalidated before their associated
 805         * descriptor can be made available for recycling. Invalidating
 806         * them later is not possible because there is no way to trust
 807         * data blocks once their associated descriptor is gone.
 808         */
 809
 810        if (!data_push_tail(rb, &rb->text_data_ring, desc.text_blk_lpos.next))
 811                return false;
 812
 813        /*
 814         * Check the next descriptor after @tail_id before pushing the tail
 815         * to it because the tail must always be in a finalized or reusable
 816         * state. The implementation of prb_first_seq() relies on this.
 817         *
 818         * A successful read implies that the next descriptor is less than or
 819         * equal to @head_id so there is no risk of pushing the tail past the
 820         * head.
 821         */
 822        d_state = desc_read(desc_ring, DESC_ID(tail_id + 1), &desc,
 823                            NULL, NULL); /* LMM(desc_push_tail:A) */
 824
 825        if (d_state == desc_finalized || d_state == desc_reusable) {
 826                /*
 827                 * Guarantee any descriptor states that have transitioned to
 828                 * reusable are stored before pushing the tail ID. This allows
 829                 * verifying the recycled descriptor state. A full memory
 830                 * barrier is needed since other CPUs may have made the
 831                 * descriptor states reusable. This pairs with desc_reserve:D.
 832                 */
 833                atomic_long_cmpxchg(&desc_ring->tail_id, tail_id,
 834                                    DESC_ID(tail_id + 1)); /* LMM(desc_push_tail:B) */
 835        } else {
 836                /*
 837                 * Guarantee the last state load from desc_read() is before
 838                 * reloading @tail_id in order to see a new tail ID in the
 839                 * case that the descriptor has been recycled. This pairs
 840                 * with desc_reserve:D.
 841                 *
 842                 * Memory barrier involvement:
 843                 *
 844                 * If desc_push_tail:A reads from desc_reserve:F, then
 845                 * desc_push_tail:D reads from desc_push_tail:B.
 846                 *
 847                 * Relies on:
 848                 *
 849                 * MB from desc_push_tail:B to desc_reserve:F
 850                 *    matching
 851                 * RMB from desc_push_tail:A to desc_push_tail:D
 852                 *
 853                 * Note: desc_push_tail:B and desc_reserve:F can be different
 854                 *       CPUs. However, the desc_reserve:F CPU (which performs
 855                 *       the full memory barrier) must have previously seen
 856                 *       desc_push_tail:B.
 857                 */
 858                smp_rmb(); /* LMM(desc_push_tail:C) */
 859
 860                /*
 861                 * Re-check the tail ID. The descriptor following @tail_id is
 862                 * not in an allowed tail state. But if the tail has since
 863                 * been moved by another CPU, then it does not matter.
 864                 */
 865                if (atomic_long_read(&desc_ring->tail_id) == tail_id) /* LMM(desc_push_tail:D) */
 866                        return false;
 867        }
 868
 869        return true;
 870}
 871
 872/* Reserve a new descriptor, invalidating the oldest if necessary. */
 873static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out)
 874{
 875        struct prb_desc_ring *desc_ring = &rb->desc_ring;
 876        unsigned long prev_state_val;
 877        unsigned long id_prev_wrap;
 878        struct prb_desc *desc;
 879        unsigned long head_id;
 880        unsigned long id;
 881
 882        head_id = atomic_long_read(&desc_ring->head_id); /* LMM(desc_reserve:A) */
 883
 884        do {
 885                id = DESC_ID(head_id + 1);
 886                id_prev_wrap = DESC_ID_PREV_WRAP(desc_ring, id);
 887
 888                /*
 889                 * Guarantee the head ID is read before reading the tail ID.
 890                 * Since the tail ID is updated before the head ID, this
 891                 * guarantees that @id_prev_wrap is never ahead of the tail
 892                 * ID. This pairs with desc_reserve:D.
 893                 *
 894                 * Memory barrier involvement:
 895                 *
 896                 * If desc_reserve:A reads from desc_reserve:D, then
 897                 * desc_reserve:C reads from desc_push_tail:B.
 898                 *
 899                 * Relies on:
 900                 *
 901                 * MB from desc_push_tail:B to desc_reserve:D
 902                 *    matching
 903                 * RMB from desc_reserve:A to desc_reserve:C
 904                 *
 905                 * Note: desc_push_tail:B and desc_reserve:D can be different
 906                 *       CPUs. However, the desc_reserve:D CPU (which performs
 907                 *       the full memory barrier) must have previously seen
 908                 *       desc_push_tail:B.
 909                 */
 910                smp_rmb(); /* LMM(desc_reserve:B) */
 911
 912                if (id_prev_wrap == atomic_long_read(&desc_ring->tail_id
 913                                                    )) { /* LMM(desc_reserve:C) */
 914                        /*
 915                         * Make space for the new descriptor by
 916                         * advancing the tail.
 917                         */
 918                        if (!desc_push_tail(rb, id_prev_wrap))
 919                                return false;
 920                }
 921
 922                /*
 923                 * 1. Guarantee the tail ID is read before validating the
 924                 *    recycled descriptor state. A read memory barrier is
 925                 *    sufficient for this. This pairs with desc_push_tail:B.
 926                 *
 927                 *    Memory barrier involvement:
 928                 *
 929                 *    If desc_reserve:C reads from desc_push_tail:B, then
 930                 *    desc_reserve:E reads from desc_make_reusable:A.
 931                 *
 932                 *    Relies on:
 933                 *
 934                 *    MB from desc_make_reusable:A to desc_push_tail:B
 935                 *       matching
 936                 *    RMB from desc_reserve:C to desc_reserve:E
 937                 *
 938                 *    Note: desc_make_reusable:A and desc_push_tail:B can be
 939                 *          different CPUs. However, the desc_push_tail:B CPU
 940                 *          (which performs the full memory barrier) must have
 941                 *          previously seen desc_make_reusable:A.
 942                 *
 943                 * 2. Guarantee the tail ID is stored before storing the head
 944                 *    ID. This pairs with desc_reserve:B.
 945                 *
 946                 * 3. Guarantee any data ring tail changes are stored before
 947                 *    recycling the descriptor. Data ring tail changes can
 948                 *    happen via desc_push_tail()->data_push_tail(). A full
 949                 *    memory barrier is needed since another CPU may have
 950                 *    pushed the data ring tails. This pairs with
 951                 *    data_push_tail:B.
 952                 *
 953                 * 4. Guarantee a new tail ID is stored before recycling the
 954                 *    descriptor. A full memory barrier is needed since
 955                 *    another CPU may have pushed the tail ID. This pairs
 956                 *    with desc_push_tail:C and this also pairs with
 957                 *    prb_first_seq:C.
 958                 *
 959                 * 5. Guarantee the head ID is stored before trying to
 960                 *    finalize the previous descriptor. This pairs with
 961                 *    _prb_commit:B.
 962                 */
 963        } while (!atomic_long_try_cmpxchg(&desc_ring->head_id, &head_id,
 964                                          id)); /* LMM(desc_reserve:D) */
 965
 966        desc = to_desc(desc_ring, id);
 967
 968        /*
 969         * If the descriptor has been recycled, verify the old state val.
 970         * See "ABA Issues" about why this verification is performed.
 971         */
 972        prev_state_val = atomic_long_read(&desc->state_var); /* LMM(desc_reserve:E) */
 973        if (prev_state_val &&
 974            get_desc_state(id_prev_wrap, prev_state_val) != desc_reusable) {
 975                WARN_ON_ONCE(1);
 976                return false;
 977        }
 978
 979        /*
 980         * Assign the descriptor a new ID and set its state to reserved.
 981         * See "ABA Issues" about why cmpxchg() instead of set() is used.
 982         *
 983         * Guarantee the new descriptor ID and state is stored before making
 984         * any other changes. A write memory barrier is sufficient for this.
 985         * This pairs with desc_read:D.
 986         */
 987        if (!atomic_long_try_cmpxchg(&desc->state_var, &prev_state_val,
 988                        DESC_SV(id, desc_reserved))) { /* LMM(desc_reserve:F) */
 989                WARN_ON_ONCE(1);
 990                return false;
 991        }
 992
 993        /* Now data in @desc can be modified: LMM(desc_reserve:G) */
 994
 995        *id_out = id;
 996        return true;
 997}
 998
 999/* Determine the end of a data block. */
1000static unsigned long get_next_lpos(struct prb_data_ring *data_ring,
1001                                   unsigned long lpos, unsigned int size)
1002{
1003        unsigned long begin_lpos;
1004        unsigned long next_lpos;
1005
1006        begin_lpos = lpos;
1007        next_lpos = lpos + size;
1008
1009        /* First check if the data block does not wrap. */
1010        if (DATA_WRAPS(data_ring, begin_lpos) == DATA_WRAPS(data_ring, next_lpos))
1011                return next_lpos;
1012
1013        /* Wrapping data blocks store their data at the beginning. */
1014        return (DATA_THIS_WRAP_START_LPOS(data_ring, next_lpos) + size);
1015}
1016
1017/*
1018 * Allocate a new data block, invalidating the oldest data block(s)
1019 * if necessary. This function also associates the data block with
1020 * a specified descriptor.
1021 */
1022static char *data_alloc(struct printk_ringbuffer *rb,
1023                        struct prb_data_ring *data_ring, unsigned int size,
1024                        struct prb_data_blk_lpos *blk_lpos, unsigned long id)
1025{
1026        struct prb_data_block *blk;
1027        unsigned long begin_lpos;
1028        unsigned long next_lpos;
1029
1030        if (size == 0) {
1031                /* Specify a data-less block. */
1032                blk_lpos->begin = NO_LPOS;
1033                blk_lpos->next = NO_LPOS;
1034                return NULL;
1035        }
1036
1037        size = to_blk_size(size);
1038
1039        begin_lpos = atomic_long_read(&data_ring->head_lpos);
1040
1041        do {
1042                next_lpos = get_next_lpos(data_ring, begin_lpos, size);
1043
1044                if (!data_push_tail(rb, data_ring, next_lpos - DATA_SIZE(data_ring))) {
1045                        /* Failed to allocate, specify a data-less block. */
1046                        blk_lpos->begin = FAILED_LPOS;
1047                        blk_lpos->next = FAILED_LPOS;
1048                        return NULL;
1049                }
1050
1051                /*
1052                 * 1. Guarantee any descriptor states that have transitioned
1053                 *    to reusable are stored before modifying the newly
1054                 *    allocated data area. A full memory barrier is needed
1055                 *    since other CPUs may have made the descriptor states
1056                 *    reusable. See data_push_tail:A about why the reusable
1057                 *    states are visible. This pairs with desc_read:D.
1058                 *
1059                 * 2. Guarantee any updated tail lpos is stored before
1060                 *    modifying the newly allocated data area. Another CPU may
1061                 *    be in data_make_reusable() and is reading a block ID
1062                 *    from this area. data_make_reusable() can handle reading
1063                 *    a garbage block ID value, but then it must be able to
1064                 *    load a new tail lpos. A full memory barrier is needed
1065                 *    since other CPUs may have updated the tail lpos. This
1066                 *    pairs with data_push_tail:B.
1067                 */
1068        } while (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &begin_lpos,
1069                                          next_lpos)); /* LMM(data_alloc:A) */
1070
1071        blk = to_block(data_ring, begin_lpos);
1072        blk->id = id; /* LMM(data_alloc:B) */
1073
1074        if (DATA_WRAPS(data_ring, begin_lpos) != DATA_WRAPS(data_ring, next_lpos)) {
1075                /* Wrapping data blocks store their data at the beginning. */
1076                blk = to_block(data_ring, 0);
1077
1078                /*
1079                 * Store the ID on the wrapped block for consistency.
1080                 * The printk_ringbuffer does not actually use it.
1081                 */
1082                blk->id = id;
1083        }
1084
1085        blk_lpos->begin = begin_lpos;
1086        blk_lpos->next = next_lpos;
1087
1088        return &blk->data[0];
1089}
1090
1091/*
1092 * Try to resize an existing data block associated with the descriptor
1093 * specified by @id. If the resized data block should become wrapped, it
1094 * copies the old data to the new data block. If @size yields a data block
1095 * with the same or less size, the data block is left as is.
1096 *
1097 * Fail if this is not the last allocated data block or if there is not
1098 * enough space or it is not possible make enough space.
1099 *
1100 * Return a pointer to the beginning of the entire data buffer or NULL on
1101 * failure.
1102 */
1103static char *data_realloc(struct printk_ringbuffer *rb,
1104                          struct prb_data_ring *data_ring, unsigned int size,
1105                          struct prb_data_blk_lpos *blk_lpos, unsigned long id)
1106{
1107        struct prb_data_block *blk;
1108        unsigned long head_lpos;
1109        unsigned long next_lpos;
1110        bool wrapped;
1111
1112        /* Reallocation only works if @blk_lpos is the newest data block. */
1113        head_lpos = atomic_long_read(&data_ring->head_lpos);
1114        if (head_lpos != blk_lpos->next)
1115                return NULL;
1116
1117        /* Keep track if @blk_lpos was a wrapping data block. */
1118        wrapped = (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, blk_lpos->next));
1119
1120        size = to_blk_size(size);
1121
1122        next_lpos = get_next_lpos(data_ring, blk_lpos->begin, size);
1123
1124        /* If the data block does not increase, there is nothing to do. */
1125        if (head_lpos - next_lpos < DATA_SIZE(data_ring)) {
1126                if (wrapped)
1127                        blk = to_block(data_ring, 0);
1128                else
1129                        blk = to_block(data_ring, blk_lpos->begin);
1130                return &blk->data[0];
1131        }
1132
1133        if (!data_push_tail(rb, data_ring, next_lpos - DATA_SIZE(data_ring)))
1134                return NULL;
1135
1136        /* The memory barrier involvement is the same as data_alloc:A. */
1137        if (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &head_lpos,
1138                                     next_lpos)) { /* LMM(data_realloc:A) */
1139                return NULL;
1140        }
1141
1142        blk = to_block(data_ring, blk_lpos->begin);
1143
1144        if (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, next_lpos)) {
1145                struct prb_data_block *old_blk = blk;
1146
1147                /* Wrapping data blocks store their data at the beginning. */
1148                blk = to_block(data_ring, 0);
1149
1150                /*
1151                 * Store the ID on the wrapped block for consistency.
1152                 * The printk_ringbuffer does not actually use it.
1153                 */
1154                blk->id = id;
1155
1156                if (!wrapped) {
1157                        /*
1158                         * Since the allocated space is now in the newly
1159                         * created wrapping data block, copy the content
1160                         * from the old data block.
1161                         */
1162                        memcpy(&blk->data[0], &old_blk->data[0],
1163                               (blk_lpos->next - blk_lpos->begin) - sizeof(blk->id));
1164                }
1165        }
1166
1167        blk_lpos->next = next_lpos;
1168
1169        return &blk->data[0];
1170}
1171
1172/* Return the number of bytes used by a data block. */
1173static unsigned int space_used(struct prb_data_ring *data_ring,
1174                               struct prb_data_blk_lpos *blk_lpos)
1175{
1176        /* Data-less blocks take no space. */
1177        if (BLK_DATALESS(blk_lpos))
1178                return 0;
1179
1180        if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) {
1181                /* Data block does not wrap. */
1182                return (DATA_INDEX(data_ring, blk_lpos->next) -
1183                        DATA_INDEX(data_ring, blk_lpos->begin));
1184        }
1185
1186        /*
1187         * For wrapping data blocks, the trailing (wasted) space is
1188         * also counted.
1189         */
1190        return (DATA_INDEX(data_ring, blk_lpos->next) +
1191                DATA_SIZE(data_ring) - DATA_INDEX(data_ring, blk_lpos->begin));
1192}
1193
1194/*
1195 * Given @blk_lpos, return a pointer to the writer data from the data block
1196 * and calculate the size of the data part. A NULL pointer is returned if
1197 * @blk_lpos specifies values that could never be legal.
1198 *
1199 * This function (used by readers) performs strict validation on the lpos
1200 * values to possibly detect bugs in the writer code. A WARN_ON_ONCE() is
1201 * triggered if an internal error is detected.
1202 */
1203static const char *get_data(struct prb_data_ring *data_ring,
1204                            struct prb_data_blk_lpos *blk_lpos,
1205                            unsigned int *data_size)
1206{
1207        struct prb_data_block *db;
1208
1209        /* Data-less data block description. */
1210        if (BLK_DATALESS(blk_lpos)) {
1211                if (blk_lpos->begin == NO_LPOS && blk_lpos->next == NO_LPOS) {
1212                        *data_size = 0;
1213                        return "";
1214                }
1215                return NULL;
1216        }
1217
1218        /* Regular data block: @begin less than @next and in same wrap. */
1219        if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) &&
1220            blk_lpos->begin < blk_lpos->next) {
1221                db = to_block(data_ring, blk_lpos->begin);
1222                *data_size = blk_lpos->next - blk_lpos->begin;
1223
1224        /* Wrapping data block: @begin is one wrap behind @next. */
1225        } else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) ==
1226                   DATA_WRAPS(data_ring, blk_lpos->next)) {
1227                db = to_block(data_ring, 0);
1228                *data_size = DATA_INDEX(data_ring, blk_lpos->next);
1229
1230        /* Illegal block description. */
1231        } else {
1232                WARN_ON_ONCE(1);
1233                return NULL;
1234        }
1235
1236        /* A valid data block will always be aligned to the ID size. */
1237        if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) ||
1238            WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) {
1239                return NULL;
1240        }
1241
1242        /* A valid data block will always have at least an ID. */
1243        if (WARN_ON_ONCE(*data_size < sizeof(db->id)))
1244                return NULL;
1245
1246        /* Subtract block ID space from size to reflect data size. */
1247        *data_size -= sizeof(db->id);
1248
1249        return &db->data[0];
1250}
1251
1252/*
1253 * Attempt to transition the newest descriptor from committed back to reserved
1254 * so that the record can be modified by a writer again. This is only possible
1255 * if the descriptor is not yet finalized and the provided @caller_id matches.
1256 */
1257static struct prb_desc *desc_reopen_last(struct prb_desc_ring *desc_ring,
1258                                         u32 caller_id, unsigned long *id_out)
1259{
1260        unsigned long prev_state_val;
1261        enum desc_state d_state;
1262        struct prb_desc desc;
1263        struct prb_desc *d;
1264        unsigned long id;
1265        u32 cid;
1266
1267        id = atomic_long_read(&desc_ring->head_id);
1268
1269        /*
1270         * To reduce unnecessarily reopening, first check if the descriptor
1271         * state and caller ID are correct.
1272         */
1273        d_state = desc_read(desc_ring, id, &desc, NULL, &cid);
1274        if (d_state != desc_committed || cid != caller_id)
1275                return NULL;
1276
1277        d = to_desc(desc_ring, id);
1278
1279        prev_state_val = DESC_SV(id, desc_committed);
1280
1281        /*
1282         * Guarantee the reserved state is stored before reading any
1283         * record data. A full memory barrier is needed because @state_var
1284         * modification is followed by reading. This pairs with _prb_commit:B.
1285         *
1286         * Memory barrier involvement:
1287         *
1288         * If desc_reopen_last:A reads from _prb_commit:B, then
1289         * prb_reserve_in_last:A reads from _prb_commit:A.
1290         *
1291         * Relies on:
1292         *
1293         * WMB from _prb_commit:A to _prb_commit:B
1294         *    matching
1295         * MB If desc_reopen_last:A to prb_reserve_in_last:A
1296         */
1297        if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val,
1298                        DESC_SV(id, desc_reserved))) { /* LMM(desc_reopen_last:A) */
1299                return NULL;
1300        }
1301
1302        *id_out = id;
1303        return d;
1304}
1305
1306/**
1307 * prb_reserve_in_last() - Re-reserve and extend the space in the ringbuffer
1308 *                         used by the newest record.
1309 *
1310 * @e:         The entry structure to setup.
1311 * @rb:        The ringbuffer to re-reserve and extend data in.
1312 * @r:         The record structure to allocate buffers for.
1313 * @caller_id: The caller ID of the caller (reserving writer).
1314 * @max_size:  Fail if the extended size would be greater than this.
1315 *
1316 * This is the public function available to writers to re-reserve and extend
1317 * data.
1318 *
1319 * The writer specifies the text size to extend (not the new total size) by
1320 * setting the @text_buf_size field of @r. To ensure proper initialization
1321 * of @r, prb_rec_init_wr() should be used.
1322 *
1323 * This function will fail if @caller_id does not match the caller ID of the
1324 * newest record. In that case the caller must reserve new data using
1325 * prb_reserve().
1326 *
1327 * Context: Any context. Disables local interrupts on success.
1328 * Return: true if text data could be extended, otherwise false.
1329 *
1330 * On success:
1331 *
1332 *   - @r->text_buf points to the beginning of the entire text buffer.
1333 *
1334 *   - @r->text_buf_size is set to the new total size of the buffer.
1335 *
1336 *   - @r->info is not touched so that @r->info->text_len could be used
1337 *     to append the text.
1338 *
1339 *   - prb_record_text_space() can be used on @e to query the new
1340 *     actually used space.
1341 *
1342 * Important: All @r->info fields will already be set with the current values
1343 *            for the record. I.e. @r->info->text_len will be less than
1344 *            @text_buf_size. Writers can use @r->info->text_len to know
1345 *            where concatenation begins and writers should update
1346 *            @r->info->text_len after concatenating.
1347 */
1348bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
1349                         struct printk_record *r, u32 caller_id, unsigned int max_size)
1350{
1351        struct prb_desc_ring *desc_ring = &rb->desc_ring;
1352        struct printk_info *info;
1353        unsigned int data_size;
1354        struct prb_desc *d;
1355        unsigned long id;
1356
1357        local_irq_save(e->irqflags);
1358
1359        /* Transition the newest descriptor back to the reserved state. */
1360        d = desc_reopen_last(desc_ring, caller_id, &id);
1361        if (!d) {
1362                local_irq_restore(e->irqflags);
1363                goto fail_reopen;
1364        }
1365
1366        /* Now the writer has exclusive access: LMM(prb_reserve_in_last:A) */
1367
1368        info = to_info(desc_ring, id);
1369
1370        /*
1371         * Set the @e fields here so that prb_commit() can be used if
1372         * anything fails from now on.
1373         */
1374        e->rb = rb;
1375        e->id = id;
1376
1377        /*
1378         * desc_reopen_last() checked the caller_id, but there was no
1379         * exclusive access at that point. The descriptor may have
1380         * changed since then.
1381         */
1382        if (caller_id != info->caller_id)
1383                goto fail;
1384
1385        if (BLK_DATALESS(&d->text_blk_lpos)) {
1386                if (WARN_ON_ONCE(info->text_len != 0)) {
1387                        pr_warn_once("wrong text_len value (%hu, expecting 0)\n",
1388                                     info->text_len);
1389                        info->text_len = 0;
1390                }
1391
1392                if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
1393                        goto fail;
1394
1395                if (r->text_buf_size > max_size)
1396                        goto fail;
1397
1398                r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size,
1399                                         &d->text_blk_lpos, id);
1400        } else {
1401                if (!get_data(&rb->text_data_ring, &d->text_blk_lpos, &data_size))
1402                        goto fail;
1403
1404                /*
1405                 * Increase the buffer size to include the original size. If
1406                 * the meta data (@text_len) is not sane, use the full data
1407                 * block size.
1408                 */
1409                if (WARN_ON_ONCE(info->text_len > data_size)) {
1410                        pr_warn_once("wrong text_len value (%hu, expecting <=%u)\n",
1411                                     info->text_len, data_size);
1412                        info->text_len = data_size;
1413                }
1414                r->text_buf_size += info->text_len;
1415
1416                if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
1417                        goto fail;
1418
1419                if (r->text_buf_size > max_size)
1420                        goto fail;
1421
1422                r->text_buf = data_realloc(rb, &rb->text_data_ring, r->text_buf_size,
1423                                           &d->text_blk_lpos, id);
1424        }
1425        if (r->text_buf_size && !r->text_buf)
1426                goto fail;
1427
1428        r->info = info;
1429
1430        e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos);
1431
1432        return true;
1433fail:
1434        prb_commit(e);
1435        /* prb_commit() re-enabled interrupts. */
1436fail_reopen:
1437        /* Make it clear to the caller that the re-reserve failed. */
1438        memset(r, 0, sizeof(*r));
1439        return false;
1440}
1441
1442/*
1443 * Attempt to finalize a specified descriptor. If this fails, the descriptor
1444 * is either already final or it will finalize itself when the writer commits.
1445 */
1446static void desc_make_final(struct prb_desc_ring *desc_ring, unsigned long id)
1447{
1448        unsigned long prev_state_val = DESC_SV(id, desc_committed);
1449        struct prb_desc *d = to_desc(desc_ring, id);
1450
1451        atomic_long_cmpxchg_relaxed(&d->state_var, prev_state_val,
1452                        DESC_SV(id, desc_finalized)); /* LMM(desc_make_final:A) */
1453}
1454
1455/**
1456 * prb_reserve() - Reserve space in the ringbuffer.
1457 *
1458 * @e:  The entry structure to setup.
1459 * @rb: The ringbuffer to reserve data in.
1460 * @r:  The record structure to allocate buffers for.
1461 *
1462 * This is the public function available to writers to reserve data.
1463 *
1464 * The writer specifies the text size to reserve by setting the
1465 * @text_buf_size field of @r. To ensure proper initialization of @r,
1466 * prb_rec_init_wr() should be used.
1467 *
1468 * Context: Any context. Disables local interrupts on success.
1469 * Return: true if at least text data could be allocated, otherwise false.
1470 *
1471 * On success, the fields @info and @text_buf of @r will be set by this
1472 * function and should be filled in by the writer before committing. Also
1473 * on success, prb_record_text_space() can be used on @e to query the actual
1474 * space used for the text data block.
1475 *
1476 * Important: @info->text_len needs to be set correctly by the writer in
1477 *            order for data to be readable and/or extended. Its value
1478 *            is initialized to 0.
1479 */
1480bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
1481                 struct printk_record *r)
1482{
1483        struct prb_desc_ring *desc_ring = &rb->desc_ring;
1484        struct printk_info *info;
1485        struct prb_desc *d;
1486        unsigned long id;
1487        u64 seq;
1488
1489        if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
1490                goto fail;
1491
1492        /*
1493         * Descriptors in the reserved state act as blockers to all further
1494         * reservations once the desc_ring has fully wrapped. Disable
1495         * interrupts during the reserve/commit window in order to minimize
1496         * the likelihood of this happening.
1497         */
1498        local_irq_save(e->irqflags);
1499
1500        if (!desc_reserve(rb, &id)) {
1501                /* Descriptor reservation failures are tracked. */
1502                atomic_long_inc(&rb->fail);
1503                local_irq_restore(e->irqflags);
1504                goto fail;
1505        }
1506
1507        d = to_desc(desc_ring, id);
1508        info = to_info(desc_ring, id);
1509
1510        /*
1511         * All @info fields (except @seq) are cleared and must be filled in
1512         * by the writer. Save @seq before clearing because it is used to
1513         * determine the new sequence number.
1514         */
1515        seq = info->seq;
1516        memset(info, 0, sizeof(*info));
1517
1518        /*
1519         * Set the @e fields here so that prb_commit() can be used if
1520         * text data allocation fails.
1521         */
1522        e->rb = rb;
1523        e->id = id;
1524
1525        /*
1526         * Initialize the sequence number if it has "never been set".
1527         * Otherwise just increment it by a full wrap.
1528         *
1529         * @seq is considered "never been set" if it has a value of 0,
1530         * _except_ for @infos[0], which was specially setup by the ringbuffer
1531         * initializer and therefore is always considered as set.
1532         *
1533         * See the "Bootstrap" comment block in printk_ringbuffer.h for
1534         * details about how the initializer bootstraps the descriptors.
1535         */
1536        if (seq == 0 && DESC_INDEX(desc_ring, id) != 0)
1537                info->seq = DESC_INDEX(desc_ring, id);
1538        else
1539                info->seq = seq + DESCS_COUNT(desc_ring);
1540
1541        /*
1542         * New data is about to be reserved. Once that happens, previous
1543         * descriptors are no longer able to be extended. Finalize the
1544         * previous descriptor now so that it can be made available to
1545         * readers. (For seq==0 there is no previous descriptor.)
1546         */
1547        if (info->seq > 0)
1548                desc_make_final(desc_ring, DESC_ID(id - 1));
1549
1550        r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size,
1551                                 &d->text_blk_lpos, id);
1552        /* If text data allocation fails, a data-less record is committed. */
1553        if (r->text_buf_size && !r->text_buf) {
1554                prb_commit(e);
1555                /* prb_commit() re-enabled interrupts. */
1556                goto fail;
1557        }
1558
1559        r->info = info;
1560
1561        /* Record full text space used by record. */
1562        e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos);
1563
1564        return true;
1565fail:
1566        /* Make it clear to the caller that the reserve failed. */
1567        memset(r, 0, sizeof(*r));
1568        return false;
1569}
1570
1571/* Commit the data (possibly finalizing it) and restore interrupts. */
1572static void _prb_commit(struct prb_reserved_entry *e, unsigned long state_val)
1573{
1574        struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
1575        struct prb_desc *d = to_desc(desc_ring, e->id);
1576        unsigned long prev_state_val = DESC_SV(e->id, desc_reserved);
1577
1578        /* Now the writer has finished all writing: LMM(_prb_commit:A) */
1579
1580        /*
1581         * Set the descriptor as committed. See "ABA Issues" about why
1582         * cmpxchg() instead of set() is used.
1583         *
1584         * 1  Guarantee all record data is stored before the descriptor state
1585         *    is stored as committed. A write memory barrier is sufficient
1586         *    for this. This pairs with desc_read:B and desc_reopen_last:A.
1587         *
1588         * 2. Guarantee the descriptor state is stored as committed before
1589         *    re-checking the head ID in order to possibly finalize this
1590         *    descriptor. This pairs with desc_reserve:D.
1591         *
1592         *    Memory barrier involvement:
1593         *
1594         *    If prb_commit:A reads from desc_reserve:D, then
1595         *    desc_make_final:A reads from _prb_commit:B.
1596         *
1597         *    Relies on:
1598         *
1599         *    MB _prb_commit:B to prb_commit:A
1600         *       matching
1601         *    MB desc_reserve:D to desc_make_final:A
1602         */
1603        if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val,
1604                        DESC_SV(e->id, state_val))) { /* LMM(_prb_commit:B) */
1605                WARN_ON_ONCE(1);
1606        }
1607
1608        /* Restore interrupts, the reserve/commit window is finished. */
1609        local_irq_restore(e->irqflags);
1610}
1611
1612/**
1613 * prb_commit() - Commit (previously reserved) data to the ringbuffer.
1614 *
1615 * @e: The entry containing the reserved data information.
1616 *
1617 * This is the public function available to writers to commit data.
1618 *
1619 * Note that the data is not yet available to readers until it is finalized.
1620 * Finalizing happens automatically when space for the next record is
1621 * reserved.
1622 *
1623 * See prb_final_commit() for a version of this function that finalizes
1624 * immediately.
1625 *
1626 * Context: Any context. Enables local interrupts.
1627 */
1628void prb_commit(struct prb_reserved_entry *e)
1629{
1630        struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
1631        unsigned long head_id;
1632
1633        _prb_commit(e, desc_committed);
1634
1635        /*
1636         * If this descriptor is no longer the head (i.e. a new record has
1637         * been allocated), extending the data for this record is no longer
1638         * allowed and therefore it must be finalized.
1639         */
1640        head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_commit:A) */
1641        if (head_id != e->id)
1642                desc_make_final(desc_ring, e->id);
1643}
1644
1645/**
1646 * prb_final_commit() - Commit and finalize (previously reserved) data to
1647 *                      the ringbuffer.
1648 *
1649 * @e: The entry containing the reserved data information.
1650 *
1651 * This is the public function available to writers to commit+finalize data.
1652 *
1653 * By finalizing, the data is made immediately available to readers.
1654 *
1655 * This function should only be used if there are no intentions of extending
1656 * this data using prb_reserve_in_last().
1657 *
1658 * Context: Any context. Enables local interrupts.
1659 */
1660void prb_final_commit(struct prb_reserved_entry *e)
1661{
1662        _prb_commit(e, desc_finalized);
1663}
1664
1665/*
1666 * Count the number of lines in provided text. All text has at least 1 line
1667 * (even if @text_size is 0). Each '\n' processed is counted as an additional
1668 * line.
1669 */
1670static unsigned int count_lines(const char *text, unsigned int text_size)
1671{
1672        unsigned int next_size = text_size;
1673        unsigned int line_count = 1;
1674        const char *next = text;
1675
1676        while (next_size) {
1677                next = memchr(next, '\n', next_size);
1678                if (!next)
1679                        break;
1680                line_count++;
1681                next++;
1682                next_size = text_size - (next - text);
1683        }
1684
1685        return line_count;
1686}
1687
1688/*
1689 * Given @blk_lpos, copy an expected @len of data into the provided buffer.
1690 * If @line_count is provided, count the number of lines in the data.
1691 *
1692 * This function (used by readers) performs strict validation on the data
1693 * size to possibly detect bugs in the writer code. A WARN_ON_ONCE() is
1694 * triggered if an internal error is detected.
1695 */
1696static bool copy_data(struct prb_data_ring *data_ring,
1697                      struct prb_data_blk_lpos *blk_lpos, u16 len, char *buf,
1698                      unsigned int buf_size, unsigned int *line_count)
1699{
1700        unsigned int data_size;
1701        const char *data;
1702
1703        /* Caller might not want any data. */
1704        if ((!buf || !buf_size) && !line_count)
1705                return true;
1706
1707        data = get_data(data_ring, blk_lpos, &data_size);
1708        if (!data)
1709                return false;
1710
1711        /*
1712         * Actual cannot be less than expected. It can be more than expected
1713         * because of the trailing alignment padding.
1714         *
1715         * Note that invalid @len values can occur because the caller loads
1716         * the value during an allowed data race.
1717         */
1718        if (data_size < (unsigned int)len)
1719                return false;
1720
1721        /* Caller interested in the line count? */
1722        if (line_count)
1723                *line_count = count_lines(data, data_size);
1724
1725        /* Caller interested in the data content? */
1726        if (!buf || !buf_size)
1727                return true;
1728
1729        data_size = min_t(u16, buf_size, len);
1730
1731        memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */
1732        return true;
1733}
1734
1735/*
1736 * This is an extended version of desc_read(). It gets a copy of a specified
1737 * descriptor. However, it also verifies that the record is finalized and has
1738 * the sequence number @seq. On success, 0 is returned.
1739 *
1740 * Error return values:
1741 * -EINVAL: A finalized record with sequence number @seq does not exist.
1742 * -ENOENT: A finalized record with sequence number @seq exists, but its data
1743 *          is not available. This is a valid record, so readers should
1744 *          continue with the next record.
1745 */
1746static int desc_read_finalized_seq(struct prb_desc_ring *desc_ring,
1747                                   unsigned long id, u64 seq,
1748                                   struct prb_desc *desc_out)
1749{
1750        struct prb_data_blk_lpos *blk_lpos = &desc_out->text_blk_lpos;
1751        enum desc_state d_state;
1752        u64 s;
1753
1754        d_state = desc_read(desc_ring, id, desc_out, &s, NULL);
1755
1756        /*
1757         * An unexpected @id (desc_miss) or @seq mismatch means the record
1758         * does not exist. A descriptor in the reserved or committed state
1759         * means the record does not yet exist for the reader.
1760         */
1761        if (d_state == desc_miss ||
1762            d_state == desc_reserved ||
1763            d_state == desc_committed ||
1764            s != seq) {
1765                return -EINVAL;
1766        }
1767
1768        /*
1769         * A descriptor in the reusable state may no longer have its data
1770         * available; report it as existing but with lost data. Or the record
1771         * may actually be a record with lost data.
1772         */
1773        if (d_state == desc_reusable ||
1774            (blk_lpos->begin == FAILED_LPOS && blk_lpos->next == FAILED_LPOS)) {
1775                return -ENOENT;
1776        }
1777
1778        return 0;
1779}
1780
1781/*
1782 * Copy the ringbuffer data from the record with @seq to the provided
1783 * @r buffer. On success, 0 is returned.
1784 *
1785 * See desc_read_finalized_seq() for error return values.
1786 */
1787static int prb_read(struct printk_ringbuffer *rb, u64 seq,
1788                    struct printk_record *r, unsigned int *line_count)
1789{
1790        struct prb_desc_ring *desc_ring = &rb->desc_ring;
1791        struct printk_info *info = to_info(desc_ring, seq);
1792        struct prb_desc *rdesc = to_desc(desc_ring, seq);
1793        atomic_long_t *state_var = &rdesc->state_var;
1794        struct prb_desc desc;
1795        unsigned long id;
1796        int err;
1797
1798        /* Extract the ID, used to specify the descriptor to read. */
1799        id = DESC_ID(atomic_long_read(state_var));
1800
1801        /* Get a local copy of the correct descriptor (if available). */
1802        err = desc_read_finalized_seq(desc_ring, id, seq, &desc);
1803
1804        /*
1805         * If @r is NULL, the caller is only interested in the availability
1806         * of the record.
1807         */
1808        if (err || !r)
1809                return err;
1810
1811        /* If requested, copy meta data. */
1812        if (r->info)
1813                memcpy(r->info, info, sizeof(*(r->info)));
1814
1815        /* Copy text data. If it fails, this is a data-less record. */
1816        if (!copy_data(&rb->text_data_ring, &desc.text_blk_lpos, info->text_len,
1817                       r->text_buf, r->text_buf_size, line_count)) {
1818                return -ENOENT;
1819        }
1820
1821        /* Ensure the record is still finalized and has the same @seq. */
1822        return desc_read_finalized_seq(desc_ring, id, seq, &desc);
1823}
1824
1825/* Get the sequence number of the tail descriptor. */
1826static u64 prb_first_seq(struct printk_ringbuffer *rb)
1827{
1828        struct prb_desc_ring *desc_ring = &rb->desc_ring;
1829        enum desc_state d_state;
1830        struct prb_desc desc;
1831        unsigned long id;
1832        u64 seq;
1833
1834        for (;;) {
1835                id = atomic_long_read(&rb->desc_ring.tail_id); /* LMM(prb_first_seq:A) */
1836
1837                d_state = desc_read(desc_ring, id, &desc, &seq, NULL); /* LMM(prb_first_seq:B) */
1838
1839                /*
1840                 * This loop will not be infinite because the tail is
1841                 * _always_ in the finalized or reusable state.
1842                 */
1843                if (d_state == desc_finalized || d_state == desc_reusable)
1844                        break;
1845
1846                /*
1847                 * Guarantee the last state load from desc_read() is before
1848                 * reloading @tail_id in order to see a new tail in the case
1849                 * that the descriptor has been recycled. This pairs with
1850                 * desc_reserve:D.
1851                 *
1852                 * Memory barrier involvement:
1853                 *
1854                 * If prb_first_seq:B reads from desc_reserve:F, then
1855                 * prb_first_seq:A reads from desc_push_tail:B.
1856                 *
1857                 * Relies on:
1858                 *
1859                 * MB from desc_push_tail:B to desc_reserve:F
1860                 *    matching
1861                 * RMB prb_first_seq:B to prb_first_seq:A
1862                 */
1863                smp_rmb(); /* LMM(prb_first_seq:C) */
1864        }
1865
1866        return seq;
1867}
1868
1869/*
1870 * Non-blocking read of a record. Updates @seq to the last finalized record
1871 * (which may have no data available).
1872 *
1873 * See the description of prb_read_valid() and prb_read_valid_info()
1874 * for details.
1875 */
1876static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
1877                            struct printk_record *r, unsigned int *line_count)
1878{
1879        u64 tail_seq;
1880        int err;
1881
1882        while ((err = prb_read(rb, *seq, r, line_count))) {
1883                tail_seq = prb_first_seq(rb);
1884
1885                if (*seq < tail_seq) {
1886                        /*
1887                         * Behind the tail. Catch up and try again. This
1888                         * can happen for -ENOENT and -EINVAL cases.
1889                         */
1890                        *seq = tail_seq;
1891
1892                } else if (err == -ENOENT) {
1893                        /* Record exists, but no data available. Skip. */
1894                        (*seq)++;
1895
1896                } else {
1897                        /* Non-existent/non-finalized record. Must stop. */
1898                        return false;
1899                }
1900        }
1901
1902        return true;
1903}
1904
1905/**
1906 * prb_read_valid() - Non-blocking read of a requested record or (if gone)
1907 *                    the next available record.
1908 *
1909 * @rb:  The ringbuffer to read from.
1910 * @seq: The sequence number of the record to read.
1911 * @r:   A record data buffer to store the read record to.
1912 *
1913 * This is the public function available to readers to read a record.
1914 *
1915 * The reader provides the @info and @text_buf buffers of @r to be
1916 * filled in. Any of the buffer pointers can be set to NULL if the reader
1917 * is not interested in that data. To ensure proper initialization of @r,
1918 * prb_rec_init_rd() should be used.
1919 *
1920 * Context: Any context.
1921 * Return: true if a record was read, otherwise false.
1922 *
1923 * On success, the reader must check r->info.seq to see which record was
1924 * actually read. This allows the reader to detect dropped records.
1925 *
1926 * Failure means @seq refers to a not yet written record.
1927 */
1928bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
1929                    struct printk_record *r)
1930{
1931        return _prb_read_valid(rb, &seq, r, NULL);
1932}
1933
1934/**
1935 * prb_read_valid_info() - Non-blocking read of meta data for a requested
1936 *                         record or (if gone) the next available record.
1937 *
1938 * @rb:         The ringbuffer to read from.
1939 * @seq:        The sequence number of the record to read.
1940 * @info:       A buffer to store the read record meta data to.
1941 * @line_count: A buffer to store the number of lines in the record text.
1942 *
1943 * This is the public function available to readers to read only the
1944 * meta data of a record.
1945 *
1946 * The reader provides the @info, @line_count buffers to be filled in.
1947 * Either of the buffer pointers can be set to NULL if the reader is not
1948 * interested in that data.
1949 *
1950 * Context: Any context.
1951 * Return: true if a record's meta data was read, otherwise false.
1952 *
1953 * On success, the reader must check info->seq to see which record meta data
1954 * was actually read. This allows the reader to detect dropped records.
1955 *
1956 * Failure means @seq refers to a not yet written record.
1957 */
1958bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
1959                         struct printk_info *info, unsigned int *line_count)
1960{
1961        struct printk_record r;
1962
1963        prb_rec_init_rd(&r, info, NULL, 0);
1964
1965        return _prb_read_valid(rb, &seq, &r, line_count);
1966}
1967
1968/**
1969 * prb_first_valid_seq() - Get the sequence number of the oldest available
1970 *                         record.
1971 *
1972 * @rb: The ringbuffer to get the sequence number from.
1973 *
1974 * This is the public function available to readers to see what the
1975 * first/oldest valid sequence number is.
1976 *
1977 * This provides readers a starting point to begin iterating the ringbuffer.
1978 *
1979 * Context: Any context.
1980 * Return: The sequence number of the first/oldest record or, if the
1981 *         ringbuffer is empty, 0 is returned.
1982 */
1983u64 prb_first_valid_seq(struct printk_ringbuffer *rb)
1984{
1985        u64 seq = 0;
1986
1987        if (!_prb_read_valid(rb, &seq, NULL, NULL))
1988                return 0;
1989
1990        return seq;
1991}
1992
1993/**
1994 * prb_next_seq() - Get the sequence number after the last available record.
1995 *
1996 * @rb:  The ringbuffer to get the sequence number from.
1997 *
1998 * This is the public function available to readers to see what the next
1999 * newest sequence number available to readers will be.
2000 *
2001 * This provides readers a sequence number to jump to if all currently
2002 * available records should be skipped.
2003 *
2004 * Context: Any context.
2005 * Return: The sequence number of the next newest (not yet available) record
2006 *         for readers.
2007 */
2008u64 prb_next_seq(struct printk_ringbuffer *rb)
2009{
2010        u64 seq = 0;
2011
2012        /* Search forward from the oldest descriptor. */
2013        while (_prb_read_valid(rb, &seq, NULL, NULL))
2014                seq++;
2015
2016        return seq;
2017}
2018
2019/**
2020 * prb_init() - Initialize a ringbuffer to use provided external buffers.
2021 *
2022 * @rb:       The ringbuffer to initialize.
2023 * @text_buf: The data buffer for text data.
2024 * @textbits: The size of @text_buf as a power-of-2 value.
2025 * @descs:    The descriptor buffer for ringbuffer records.
2026 * @descbits: The count of @descs items as a power-of-2 value.
2027 * @infos:    The printk_info buffer for ringbuffer records.
2028 *
2029 * This is the public function available to writers to setup a ringbuffer
2030 * during runtime using provided buffers.
2031 *
2032 * This must match the initialization of DEFINE_PRINTKRB().
2033 *
2034 * Context: Any context.
2035 */
2036void prb_init(struct printk_ringbuffer *rb,
2037              char *text_buf, unsigned int textbits,
2038              struct prb_desc *descs, unsigned int descbits,
2039              struct printk_info *infos)
2040{
2041        memset(descs, 0, _DESCS_COUNT(descbits) * sizeof(descs[0]));
2042        memset(infos, 0, _DESCS_COUNT(descbits) * sizeof(infos[0]));
2043
2044        rb->desc_ring.count_bits = descbits;
2045        rb->desc_ring.descs = descs;
2046        rb->desc_ring.infos = infos;
2047        atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits));
2048        atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits));
2049
2050        rb->text_data_ring.size_bits = textbits;
2051        rb->text_data_ring.data = text_buf;
2052        atomic_long_set(&rb->text_data_ring.head_lpos, BLK0_LPOS(textbits));
2053        atomic_long_set(&rb->text_data_ring.tail_lpos, BLK0_LPOS(textbits));
2054
2055        atomic_long_set(&rb->fail, 0);
2056
2057        atomic_long_set(&(descs[_DESCS_COUNT(descbits) - 1].state_var), DESC0_SV(descbits));
2058        descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.begin = FAILED_LPOS;
2059        descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.next = FAILED_LPOS;
2060
2061        infos[0].seq = -(u64)_DESCS_COUNT(descbits);
2062        infos[_DESCS_COUNT(descbits) - 1].seq = 0;
2063}
2064
2065/**
2066 * prb_record_text_space() - Query the full actual used ringbuffer space for
2067 *                           the text data of a reserved entry.
2068 *
2069 * @e: The successfully reserved entry to query.
2070 *
2071 * This is the public function available to writers to see how much actual
2072 * space is used in the ringbuffer to store the text data of the specified
2073 * entry.
2074 *
2075 * This function is only valid if @e has been successfully reserved using
2076 * prb_reserve().
2077 *
2078 * Context: Any context.
2079 * Return: The size in bytes used by the text data of the associated record.
2080 */
2081unsigned int prb_record_text_space(struct prb_reserved_entry *e)
2082{
2083        return e->text_space;
2084}
2085