linux/drivers/misc/mic/scif/scif_rb.c
<<
>>
Prefs
   1/*
   2 * Intel MIC Platform Software Stack (MPSS)
   3 *
   4 * Copyright(c) 2014 Intel Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License, version 2, as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13 * General Public License for more details.
  14 *
  15 * Intel SCIF driver.
  16 *
  17 */
  18#include <linux/circ_buf.h>
  19#include <linux/types.h>
  20#include <linux/io.h>
  21#include <linux/errno.h>
  22
  23#include "scif_rb.h"
  24
  25#define scif_rb_ring_cnt(head, tail, size) CIRC_CNT(head, tail, size)
  26#define scif_rb_ring_space(head, tail, size) CIRC_SPACE(head, tail, size)
  27
  28/**
  29 * scif_rb_init - Initializes the ring buffer
  30 * @rb: ring buffer
  31 * @read_ptr: A pointer to the read offset
  32 * @write_ptr: A pointer to the write offset
  33 * @rb_base: A pointer to the base of the ring buffer
  34 * @size: The size of the ring buffer in powers of two
  35 */
  36void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr,
  37                  void *rb_base, u8 size)
  38{
  39        rb->rb_base = rb_base;
  40        rb->size = (1 << size);
  41        rb->read_ptr = read_ptr;
  42        rb->write_ptr = write_ptr;
  43        rb->current_read_offset = *read_ptr;
  44        rb->current_write_offset = *write_ptr;
  45}
  46
  47/* Copies a message to the ring buffer -- handles the wrap around case */
  48static void memcpy_torb(struct scif_rb *rb, void *header,
  49                        void *msg, u32 size)
  50{
  51        u32 size1, size2;
  52
  53        if (header + size >= rb->rb_base + rb->size) {
  54                /* Need to call two copies if it wraps around */
  55                size1 = (u32)(rb->rb_base + rb->size - header);
  56                size2 = size - size1;
  57                memcpy_toio((void __iomem __force *)header, msg, size1);
  58                memcpy_toio((void __iomem __force *)rb->rb_base,
  59                            msg + size1, size2);
  60        } else {
  61                memcpy_toio((void __iomem __force *)header, msg, size);
  62        }
  63}
  64
  65/* Copies a message from the ring buffer -- handles the wrap around case */
  66static void memcpy_fromrb(struct scif_rb *rb, void *header,
  67                          void *msg, u32 size)
  68{
  69        u32 size1, size2;
  70
  71        if (header + size >= rb->rb_base + rb->size) {
  72                /* Need to call two copies if it wraps around */
  73                size1 = (u32)(rb->rb_base + rb->size - header);
  74                size2 = size - size1;
  75                memcpy_fromio(msg, (void __iomem __force *)header, size1);
  76                memcpy_fromio(msg + size1,
  77                              (void __iomem __force *)rb->rb_base, size2);
  78        } else {
  79                memcpy_fromio(msg, (void __iomem __force *)header, size);
  80        }
  81}
  82
  83/**
  84 * scif_rb_space - Query space available for writing to the RB
  85 * @rb: ring buffer
  86 *
  87 * Return: size available for writing to RB in bytes.
  88 */
  89u32 scif_rb_space(struct scif_rb *rb)
  90{
  91        rb->current_read_offset = *rb->read_ptr;
  92        /*
  93         * Update from the HW read pointer only once the peer has exposed the
  94         * new empty slot. This barrier is paired with the memory barrier
  95         * scif_rb_update_read_ptr()
  96         */
  97        mb();
  98        return scif_rb_ring_space(rb->current_write_offset,
  99                                  rb->current_read_offset, rb->size);
 100}
 101
 102/**
 103 * scif_rb_write - Write a message to the RB
 104 * @rb: ring buffer
 105 * @msg: buffer to send the message.  Must be at least size bytes long
 106 * @size: the size (in bytes) to be copied to the RB
 107 *
 108 * This API does not block if there isn't enough space in the RB.
 109 * Returns: 0 on success or -ENOMEM on failure
 110 */
 111int scif_rb_write(struct scif_rb *rb, void *msg, u32 size)
 112{
 113        void *header;
 114
 115        if (scif_rb_space(rb) < size)
 116                return -ENOMEM;
 117        header = rb->rb_base + rb->current_write_offset;
 118        memcpy_torb(rb, header, msg, size);
 119        /*
 120         * Wait until scif_rb_commit(). Update the local ring
 121         * buffer data, not the shared data until commit.
 122         */
 123        rb->current_write_offset =
 124                (rb->current_write_offset + size) & (rb->size - 1);
 125        return 0;
 126}
 127
 128/**
 129 * scif_rb_commit - To submit the message to let the peer fetch it
 130 * @rb: ring buffer
 131 */
 132void scif_rb_commit(struct scif_rb *rb)
 133{
 134        /*
 135         * We must ensure ordering between the all the data committed
 136         * previously before we expose the new message to the peer by
 137         * updating the write_ptr. This write barrier is paired with
 138         * the read barrier in scif_rb_count(..)
 139         */
 140        wmb();
 141        WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
 142#ifdef CONFIG_INTEL_MIC_CARD
 143        /*
 144         * X100 Si bug: For the case where a Core is performing an EXT_WR
 145         * followed by a Doorbell Write, the Core must perform two EXT_WR to the
 146         * same address with the same data before it does the Doorbell Write.
 147         * This way, if ordering is violated for the Interrupt Message, it will
 148         * fall just behind the first Posted associated with the first EXT_WR.
 149         */
 150        WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
 151#endif
 152}
 153
 154/**
 155 * scif_rb_get - To get next message from the ring buffer
 156 * @rb: ring buffer
 157 * @size: Number of bytes to be read
 158 *
 159 * Return: NULL if no bytes to be read from the ring buffer, otherwise the
 160 *      pointer to the next byte
 161 */
 162static void *scif_rb_get(struct scif_rb *rb, u32 size)
 163{
 164        void *header = NULL;
 165
 166        if (scif_rb_count(rb, size) >= size)
 167                header = rb->rb_base + rb->current_read_offset;
 168        return header;
 169}
 170
 171/*
 172 * scif_rb_get_next - Read from ring buffer.
 173 * @rb: ring buffer
 174 * @msg: buffer to hold the message.  Must be at least size bytes long
 175 * @size: Number of bytes to be read
 176 *
 177 * Return: number of bytes read if available bytes are >= size, otherwise
 178 * returns zero.
 179 */
 180u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size)
 181{
 182        void *header = NULL;
 183        int read_size = 0;
 184
 185        header = scif_rb_get(rb, size);
 186        if (header) {
 187                u32 next_cmd_offset =
 188                        (rb->current_read_offset + size) & (rb->size - 1);
 189
 190                read_size = size;
 191                rb->current_read_offset = next_cmd_offset;
 192                memcpy_fromrb(rb, header, msg, size);
 193        }
 194        return read_size;
 195}
 196
 197/**
 198 * scif_rb_update_read_ptr
 199 * @rb: ring buffer
 200 */
 201void scif_rb_update_read_ptr(struct scif_rb *rb)
 202{
 203        u32 new_offset;
 204
 205        new_offset = rb->current_read_offset;
 206        /*
 207         * We must ensure ordering between the all the data committed or read
 208         * previously before we expose the empty slot to the peer by updating
 209         * the read_ptr. This barrier is paired with the memory barrier in
 210         * scif_rb_space(..)
 211         */
 212        mb();
 213        WRITE_ONCE(*rb->read_ptr, new_offset);
 214#ifdef CONFIG_INTEL_MIC_CARD
 215        /*
 216         * X100 Si Bug: For the case where a Core is performing an EXT_WR
 217         * followed by a Doorbell Write, the Core must perform two EXT_WR to the
 218         * same address with the same data before it does the Doorbell Write.
 219         * This way, if ordering is violated for the Interrupt Message, it will
 220         * fall just behind the first Posted associated with the first EXT_WR.
 221         */
 222        WRITE_ONCE(*rb->read_ptr, new_offset);
 223#endif
 224}
 225
 226/**
 227 * scif_rb_count
 228 * @rb: ring buffer
 229 * @size: Number of bytes expected to be read
 230 *
 231 * Return: number of bytes that can be read from the RB
 232 */
 233u32 scif_rb_count(struct scif_rb *rb, u32 size)
 234{
 235        if (scif_rb_ring_cnt(rb->current_write_offset,
 236                             rb->current_read_offset,
 237                             rb->size) < size) {
 238                rb->current_write_offset = *rb->write_ptr;
 239                /*
 240                 * Update from the HW write pointer if empty only once the peer
 241                 * has exposed the new message. This read barrier is paired
 242                 * with the write barrier in scif_rb_commit(..)
 243                 */
 244                smp_rmb();
 245        }
 246        return scif_rb_ring_cnt(rb->current_write_offset,
 247                                rb->current_read_offset,
 248                                rb->size);
 249}
 250