linux/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
<<
>>
Prefs
   1/***********************license start***************
   2 * Author: Cavium Networks
   3 *
   4 * Contact: support@caviumnetworks.com
   5 * This file is part of the OCTEON SDK
   6 *
   7 * Copyright (c) 2003-2008 Cavium Networks
   8 *
   9 * This file is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License, Version 2, as
  11 * published by the Free Software Foundation.
  12 *
  13 * This file is distributed in the hope that it will be useful, but
  14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16 * NONINFRINGEMENT.  See the GNU General Public License for more
  17 * details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this file; if not, write to the Free Software
  21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  22 * or visit http://www.gnu.org/licenses/.
  23 *
  24 * This file may also be available under a different license from Cavium.
  25 * Contact Cavium Networks for more information
  26 ***********************license end**************************************/
  27
  28/*
  29 * Support functions for managing command queues used for
  30 * various hardware blocks.
  31 */
  32
  33#include <linux/kernel.h>
  34
  35#include <asm/octeon/octeon.h>
  36
  37#include <asm/octeon/cvmx-config.h>
  38#include <asm/octeon/cvmx-fpa.h>
  39#include <asm/octeon/cvmx-cmd-queue.h>
  40
  41#include <asm/octeon/cvmx-npei-defs.h>
  42#include <asm/octeon/cvmx-pexp-defs.h>
  43#include <asm/octeon/cvmx-pko-defs.h>
  44
  45/**
  46 * This application uses this pointer to access the global queue
  47 * state. It points to a bootmem named block.
  48 */
  49__cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
  50EXPORT_SYMBOL_GPL(__cvmx_cmd_queue_state_ptr);
  51
  52/**
  53 * Initialize the Global queue state pointer.
  54 *
  55 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
  56 */
  57static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
  58{
  59        char *alloc_name = "cvmx_cmd_queues";
  60#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
  61        extern uint64_t octeon_reserve32_memory;
  62#endif
  63
  64        if (likely(__cvmx_cmd_queue_state_ptr))
  65                return CVMX_CMD_QUEUE_SUCCESS;
  66
  67#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
  68        if (octeon_reserve32_memory)
  69                __cvmx_cmd_queue_state_ptr =
  70                    cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
  71                                                   octeon_reserve32_memory,
  72                                                   octeon_reserve32_memory +
  73                                                   (CONFIG_CAVIUM_RESERVE32 <<
  74                                                    20) - 1, 128, alloc_name);
  75        else
  76#endif
  77                __cvmx_cmd_queue_state_ptr =
  78                    cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
  79                                            128,
  80                                            alloc_name);
  81        if (__cvmx_cmd_queue_state_ptr)
  82                memset(__cvmx_cmd_queue_state_ptr, 0,
  83                       sizeof(*__cvmx_cmd_queue_state_ptr));
  84        else {
  85                struct cvmx_bootmem_named_block_desc *block_desc =
  86                    cvmx_bootmem_find_named_block(alloc_name);
  87                if (block_desc)
  88                        __cvmx_cmd_queue_state_ptr =
  89                            cvmx_phys_to_ptr(block_desc->base_addr);
  90                else {
  91                        cvmx_dprintf
  92                            ("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n",
  93                             alloc_name);
  94                        return CVMX_CMD_QUEUE_NO_MEMORY;
  95                }
  96        }
  97        return CVMX_CMD_QUEUE_SUCCESS;
  98}
  99
 100/**
 101 * Initialize a command queue for use. The initial FPA buffer is
 102 * allocated and the hardware unit is configured to point to the
 103 * new command queue.
 104 *
 105 * @queue_id:  Hardware command queue to initialize.
 106 * @max_depth: Maximum outstanding commands that can be queued.
 107 * @fpa_pool:  FPA pool the command queues should come from.
 108 * @pool_size: Size of each buffer in the FPA pool (bytes)
 109 *
 110 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
 111 */
 112cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
 113                                                  int max_depth, int fpa_pool,
 114                                                  int pool_size)
 115{
 116        __cvmx_cmd_queue_state_t *qstate;
 117        cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
 118        if (result != CVMX_CMD_QUEUE_SUCCESS)
 119                return result;
 120
 121        qstate = __cvmx_cmd_queue_get_state(queue_id);
 122        if (qstate == NULL)
 123                return CVMX_CMD_QUEUE_INVALID_PARAM;
 124
 125        /*
 126         * We artificially limit max_depth to 1<<20 words. It is an
 127         * arbitrary limit.
 128         */
 129        if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
 130                if ((max_depth < 0) || (max_depth > 1 << 20))
 131                        return CVMX_CMD_QUEUE_INVALID_PARAM;
 132        } else if (max_depth != 0)
 133                return CVMX_CMD_QUEUE_INVALID_PARAM;
 134
 135        if ((fpa_pool < 0) || (fpa_pool > 7))
 136                return CVMX_CMD_QUEUE_INVALID_PARAM;
 137        if ((pool_size < 128) || (pool_size > 65536))
 138                return CVMX_CMD_QUEUE_INVALID_PARAM;
 139
 140        /* See if someone else has already initialized the queue */
 141        if (qstate->base_ptr_div128) {
 142                if (max_depth != (int)qstate->max_depth) {
 143                        cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
 144                                "Queue already initialized with different "
 145                                "max_depth (%d).\n",
 146                             (int)qstate->max_depth);
 147                        return CVMX_CMD_QUEUE_INVALID_PARAM;
 148                }
 149                if (fpa_pool != qstate->fpa_pool) {
 150                        cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
 151                                "Queue already initialized with different "
 152                                "FPA pool (%u).\n",
 153                             qstate->fpa_pool);
 154                        return CVMX_CMD_QUEUE_INVALID_PARAM;
 155                }
 156                if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
 157                        cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
 158                                "Queue already initialized with different "
 159                                "FPA pool size (%u).\n",
 160                             (qstate->pool_size_m1 + 1) << 3);
 161                        return CVMX_CMD_QUEUE_INVALID_PARAM;
 162                }
 163                CVMX_SYNCWS;
 164                return CVMX_CMD_QUEUE_ALREADY_SETUP;
 165        } else {
 166                union cvmx_fpa_ctl_status status;
 167                void *buffer;
 168
 169                status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
 170                if (!status.s.enb) {
 171                        cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
 172                                     "FPA is not enabled.\n");
 173                        return CVMX_CMD_QUEUE_NO_MEMORY;
 174                }
 175                buffer = cvmx_fpa_alloc(fpa_pool);
 176                if (buffer == NULL) {
 177                        cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
 178                                     "Unable to allocate initial buffer.\n");
 179                        return CVMX_CMD_QUEUE_NO_MEMORY;
 180                }
 181
 182                memset(qstate, 0, sizeof(*qstate));
 183                qstate->max_depth = max_depth;
 184                qstate->fpa_pool = fpa_pool;
 185                qstate->pool_size_m1 = (pool_size >> 3) - 1;
 186                qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
 187                /*
 188                 * We zeroed the now serving field so we need to also
 189                 * zero the ticket.
 190                 */
 191                __cvmx_cmd_queue_state_ptr->
 192                    ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
 193                CVMX_SYNCWS;
 194                return CVMX_CMD_QUEUE_SUCCESS;
 195        }
 196}
 197
 198/**
 199 * Shutdown a queue a free it's command buffers to the FPA. The
 200 * hardware connected to the queue must be stopped before this
 201 * function is called.
 202 *
 203 * @queue_id: Queue to shutdown
 204 *
 205 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
 206 */
 207cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
 208{
 209        __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
 210        if (qptr == NULL) {
 211                cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to "
 212                             "get queue information.\n");
 213                return CVMX_CMD_QUEUE_INVALID_PARAM;
 214        }
 215
 216        if (cvmx_cmd_queue_length(queue_id) > 0) {
 217                cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still "
 218                             "has data in it.\n");
 219                return CVMX_CMD_QUEUE_FULL;
 220        }
 221
 222        __cvmx_cmd_queue_lock(queue_id, qptr);
 223        if (qptr->base_ptr_div128) {
 224                cvmx_fpa_free(cvmx_phys_to_ptr
 225                              ((uint64_t) qptr->base_ptr_div128 << 7),
 226                              qptr->fpa_pool, 0);
 227                qptr->base_ptr_div128 = 0;
 228        }
 229        __cvmx_cmd_queue_unlock(qptr);
 230
 231        return CVMX_CMD_QUEUE_SUCCESS;
 232}
 233
 234/**
 235 * Return the number of command words pending in the queue. This
 236 * function may be relatively slow for some hardware units.
 237 *
 238 * @queue_id: Hardware command queue to query
 239 *
 240 * Returns Number of outstanding commands
 241 */
 242int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
 243{
 244        if (CVMX_ENABLE_PARAMETER_CHECKING) {
 245                if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
 246                        return CVMX_CMD_QUEUE_INVALID_PARAM;
 247        }
 248
 249        /*
 250         * The cast is here so gcc with check that all values in the
 251         * cvmx_cmd_queue_id_t enumeration are here.
 252         */
 253        switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) {
 254        case CVMX_CMD_QUEUE_PKO_BASE:
 255                /*
 256                 * FIXME: Need atomic lock on
 257                 * CVMX_PKO_REG_READ_IDX. Right now we are normally
 258                 * called with the queue lock, so that is a SLIGHT
 259                 * amount of protection.
 260                 */
 261                cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
 262                if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
 263                        union cvmx_pko_mem_debug9 debug9;
 264                        debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
 265                        return debug9.cn38xx.doorbell;
 266                } else {
 267                        union cvmx_pko_mem_debug8 debug8;
 268                        debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
 269                        return debug8.cn50xx.doorbell;
 270                }
 271        case CVMX_CMD_QUEUE_ZIP:
 272        case CVMX_CMD_QUEUE_DFA:
 273        case CVMX_CMD_QUEUE_RAID:
 274                /* FIXME: Implement other lengths */
 275                return 0;
 276        case CVMX_CMD_QUEUE_DMA_BASE:
 277                {
 278                        union cvmx_npei_dmax_counts dmax_counts;
 279                        dmax_counts.u64 =
 280                            cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS
 281                                          (queue_id & 0x7));
 282                        return dmax_counts.s.dbell;
 283                }
 284        case CVMX_CMD_QUEUE_END:
 285                return CVMX_CMD_QUEUE_INVALID_PARAM;
 286        }
 287        return CVMX_CMD_QUEUE_INVALID_PARAM;
 288}
 289
 290/**
 291 * Return the command buffer to be written to. The purpose of this
 292 * function is to allow CVMX routine access t othe low level buffer
 293 * for initial hardware setup. User applications should not call this
 294 * function directly.
 295 *
 296 * @queue_id: Command queue to query
 297 *
 298 * Returns Command buffer or NULL on failure
 299 */
 300void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
 301{
 302        __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
 303        if (qptr && qptr->base_ptr_div128)
 304                return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7);
 305        else
 306                return NULL;
 307}
 308