linux/drivers/infiniband/hw/amso1100/c2.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
   3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#ifndef __C2_H
  35#define __C2_H
  36
  37#include <linux/netdevice.h>
  38#include <linux/spinlock.h>
  39#include <linux/kernel.h>
  40#include <linux/pci.h>
  41#include <linux/dma-mapping.h>
  42#include <linux/idr.h>
  43
  44#include "c2_provider.h"
  45#include "c2_mq.h"
  46#include "c2_status.h"
  47
  48#define DRV_NAME     "c2"
  49#define DRV_VERSION  "1.1"
  50#define PFX          DRV_NAME ": "
  51
  52#define BAR_0                0
  53#define BAR_2                2
  54#define BAR_4                4
  55
  56#define RX_BUF_SIZE         (1536 + 8)
  57#define ETH_JUMBO_MTU        9000
  58#define C2_MAGIC            "CEPHEUS"
  59#define C2_VERSION           4
  60#define C2_IVN              (18 & 0x7fffffff)
  61
  62#define C2_REG0_SIZE        (16 * 1024)
  63#define C2_REG2_SIZE        (2 * 1024 * 1024)
  64#define C2_REG4_SIZE        (256 * 1024 * 1024)
  65#define C2_NUM_TX_DESC       341
  66#define C2_NUM_RX_DESC       256
  67#define C2_PCI_REGS_OFFSET  (0x10000)
  68#define C2_RXP_HRXDQ_OFFSET (((C2_REG4_SIZE)/2))
  69#define C2_RXP_HRXDQ_SIZE   (4096)
  70#define C2_TXP_HTXDQ_OFFSET (((C2_REG4_SIZE)/2) + C2_RXP_HRXDQ_SIZE)
  71#define C2_TXP_HTXDQ_SIZE   (4096)
  72#define C2_TX_TIMEOUT       (6*HZ)
  73
  74/* CEPHEUS */
  75static const u8 c2_magic[] = {
  76        0x43, 0x45, 0x50, 0x48, 0x45, 0x55, 0x53
  77};
  78
  79enum adapter_pci_regs {
  80        C2_REGS_MAGIC = 0x0000,
  81        C2_REGS_VERS = 0x0008,
  82        C2_REGS_IVN = 0x000C,
  83        C2_REGS_PCI_WINSIZE = 0x0010,
  84        C2_REGS_Q0_QSIZE = 0x0014,
  85        C2_REGS_Q0_MSGSIZE = 0x0018,
  86        C2_REGS_Q0_POOLSTART = 0x001C,
  87        C2_REGS_Q0_SHARED = 0x0020,
  88        C2_REGS_Q1_QSIZE = 0x0024,
  89        C2_REGS_Q1_MSGSIZE = 0x0028,
  90        C2_REGS_Q1_SHARED = 0x0030,
  91        C2_REGS_Q2_QSIZE = 0x0034,
  92        C2_REGS_Q2_MSGSIZE = 0x0038,
  93        C2_REGS_Q2_SHARED = 0x0040,
  94        C2_REGS_ENADDR = 0x004C,
  95        C2_REGS_RDMA_ENADDR = 0x0054,
  96        C2_REGS_HRX_CUR = 0x006C,
  97};
  98
  99struct c2_adapter_pci_regs {
 100        char reg_magic[8];
 101        u32 version;
 102        u32 ivn;
 103        u32 pci_window_size;
 104        u32 q0_q_size;
 105        u32 q0_msg_size;
 106        u32 q0_pool_start;
 107        u32 q0_shared;
 108        u32 q1_q_size;
 109        u32 q1_msg_size;
 110        u32 q1_pool_start;
 111        u32 q1_shared;
 112        u32 q2_q_size;
 113        u32 q2_msg_size;
 114        u32 q2_pool_start;
 115        u32 q2_shared;
 116        u32 log_start;
 117        u32 log_size;
 118        u8 host_enaddr[8];
 119        u8 rdma_enaddr[8];
 120        u32 crash_entry;
 121        u32 crash_ready[2];
 122        u32 fw_txd_cur;
 123        u32 fw_hrxd_cur;
 124        u32 fw_rxd_cur;
 125};
 126
 127enum pci_regs {
 128        C2_HISR = 0x0000,
 129        C2_DISR = 0x0004,
 130        C2_HIMR = 0x0008,
 131        C2_DIMR = 0x000C,
 132        C2_NISR0 = 0x0010,
 133        C2_NISR1 = 0x0014,
 134        C2_NIMR0 = 0x0018,
 135        C2_NIMR1 = 0x001C,
 136        C2_IDIS = 0x0020,
 137};
 138
 139enum {
 140        C2_PCI_HRX_INT = 1 << 8,
 141        C2_PCI_HTX_INT = 1 << 17,
 142        C2_PCI_HRX_QUI = 1 << 31,
 143};
 144
 145/*
 146 * Cepheus registers in BAR0.
 147 */
 148struct c2_pci_regs {
 149        u32 hostisr;
 150        u32 dmaisr;
 151        u32 hostimr;
 152        u32 dmaimr;
 153        u32 netisr0;
 154        u32 netisr1;
 155        u32 netimr0;
 156        u32 netimr1;
 157        u32 int_disable;
 158};
 159
 160/* TXP flags */
 161enum c2_txp_flags {
 162        TXP_HTXD_DONE = 0,
 163        TXP_HTXD_READY = 1 << 0,
 164        TXP_HTXD_UNINIT = 1 << 1,
 165};
 166
 167/* RXP flags */
 168enum c2_rxp_flags {
 169        RXP_HRXD_UNINIT = 0,
 170        RXP_HRXD_READY = 1 << 0,
 171        RXP_HRXD_DONE = 1 << 1,
 172};
 173
 174/* RXP status */
 175enum c2_rxp_status {
 176        RXP_HRXD_ZERO = 0,
 177        RXP_HRXD_OK = 1 << 0,
 178        RXP_HRXD_BUF_OV = 1 << 1,
 179};
 180
 181/* TXP descriptor fields */
 182enum txp_desc {
 183        C2_TXP_FLAGS = 0x0000,
 184        C2_TXP_LEN = 0x0002,
 185        C2_TXP_ADDR = 0x0004,
 186};
 187
 188/* RXP descriptor fields */
 189enum rxp_desc {
 190        C2_RXP_FLAGS = 0x0000,
 191        C2_RXP_STATUS = 0x0002,
 192        C2_RXP_COUNT = 0x0004,
 193        C2_RXP_LEN = 0x0006,
 194        C2_RXP_ADDR = 0x0008,
 195};
 196
 197struct c2_txp_desc {
 198        u16 flags;
 199        u16 len;
 200        u64 addr;
 201} __attribute__ ((packed));
 202
 203struct c2_rxp_desc {
 204        u16 flags;
 205        u16 status;
 206        u16 count;
 207        u16 len;
 208        u64 addr;
 209} __attribute__ ((packed));
 210
 211struct c2_rxp_hdr {
 212        u16 flags;
 213        u16 status;
 214        u16 len;
 215        u16 rsvd;
 216} __attribute__ ((packed));
 217
 218struct c2_tx_desc {
 219        u32 len;
 220        u32 status;
 221        dma_addr_t next_offset;
 222};
 223
 224struct c2_rx_desc {
 225        u32 len;
 226        u32 status;
 227        dma_addr_t next_offset;
 228};
 229
 230struct c2_alloc {
 231        u32 last;
 232        u32 max;
 233        spinlock_t lock;
 234        unsigned long *table;
 235};
 236
 237struct c2_array {
 238        struct {
 239                void **page;
 240                int used;
 241        } *page_list;
 242};
 243
 244/*
 245 * The MQ shared pointer pool is organized as a linked list of
 246 * chunks. Each chunk contains a linked list of free shared pointers
 247 * that can be allocated to a given user mode client.
 248 *
 249 */
 250struct sp_chunk {
 251        struct sp_chunk *next;
 252        dma_addr_t dma_addr;
 253        DEFINE_DMA_UNMAP_ADDR(mapping);
 254        u16 head;
 255        u16 shared_ptr[0];
 256};
 257
 258struct c2_pd_table {
 259        u32 last;
 260        u32 max;
 261        spinlock_t lock;
 262        unsigned long *table;
 263};
 264
 265struct c2_qp_table {
 266        struct idr idr;
 267        spinlock_t lock;
 268};
 269
 270struct c2_element {
 271        struct c2_element *next;
 272        void *ht_desc;          /* host     descriptor */
 273        void __iomem *hw_desc;  /* hardware descriptor */
 274        struct sk_buff *skb;
 275        dma_addr_t mapaddr;
 276        u32 maplen;
 277};
 278
 279struct c2_ring {
 280        struct c2_element *to_clean;
 281        struct c2_element *to_use;
 282        struct c2_element *start;
 283        unsigned long count;
 284};
 285
 286struct c2_dev {
 287        struct ib_device ibdev;
 288        void __iomem *regs;
 289        void __iomem *mmio_txp_ring; /* remapped adapter memory for hw rings */
 290        void __iomem *mmio_rxp_ring;
 291        spinlock_t lock;
 292        struct pci_dev *pcidev;
 293        struct net_device *netdev;
 294        struct net_device *pseudo_netdev;
 295        unsigned int cur_tx;
 296        unsigned int cur_rx;
 297        u32 adapter_handle;
 298        int device_cap_flags;
 299        void __iomem *kva;      /* KVA device memory */
 300        unsigned long pa;       /* PA device memory */
 301        void **qptr_array;
 302
 303        struct kmem_cache *host_msg_cache;
 304
 305        struct list_head cca_link;              /* adapter list */
 306        struct list_head eh_wakeup_list;        /* event wakeup list */
 307        wait_queue_head_t req_vq_wo;
 308
 309        /* Cached RNIC properties */
 310        struct ib_device_attr props;
 311
 312        struct c2_pd_table pd_table;
 313        struct c2_qp_table qp_table;
 314        int ports;              /* num of GigE ports */
 315        int devnum;
 316        spinlock_t vqlock;      /* sync vbs req MQ */
 317
 318        /* Verbs Queues */
 319        struct c2_mq req_vq;    /* Verbs Request MQ */
 320        struct c2_mq rep_vq;    /* Verbs Reply MQ */
 321        struct c2_mq aeq;       /* Async Events MQ */
 322
 323        /* Kernel client MQs */
 324        struct sp_chunk *kern_mqsp_pool;
 325
 326        /* Device updates these values when posting messages to a host
 327         * target queue */
 328        u16 req_vq_shared;
 329        u16 rep_vq_shared;
 330        u16 aeq_shared;
 331        u16 irq_claimed;
 332
 333        /*
 334         * Shared host target pages for user-accessible MQs.
 335         */
 336        int hthead;             /* index of first free entry */
 337        void *htpages;          /* kernel vaddr */
 338        int htlen;              /* length of htpages memory */
 339        void *htuva;            /* user mapped vaddr */
 340        spinlock_t htlock;      /* serialize allocation */
 341
 342        u64 adapter_hint_uva;   /* access to the activity FIFO */
 343
 344        //      spinlock_t aeq_lock;
 345        //      spinlock_t rnic_lock;
 346
 347        __be16 *hint_count;
 348        dma_addr_t hint_count_dma;
 349        u16 hints_read;
 350
 351        int init;               /* TRUE if it's ready */
 352        char ae_cache_name[16];
 353        char vq_cache_name[16];
 354};
 355
 356struct c2_port {
 357        u32 msg_enable;
 358        struct c2_dev *c2dev;
 359        struct net_device *netdev;
 360
 361        spinlock_t tx_lock;
 362        u32 tx_avail;
 363        struct c2_ring tx_ring;
 364        struct c2_ring rx_ring;
 365
 366        void *mem;              /* PCI memory for host rings */
 367        dma_addr_t dma;
 368        unsigned long mem_size;
 369
 370        u32 rx_buf_size;
 371};
 372
 373/*
 374 * Activity FIFO registers in BAR0.
 375 */
 376#define PCI_BAR0_HOST_HINT      0x100
 377#define PCI_BAR0_ADAPTER_HINT   0x2000
 378
 379/*
 380 * Ammasso PCI vendor id and Cepheus PCI device id.
 381 */
 382#define CQ_ARMED        0x01
 383#define CQ_WAIT_FOR_DMA 0x80
 384
 385/*
 386 * The format of a hint is as follows:
 387 * Lower 16 bits are the count of hints for the queue.
 388 * Next 15 bits are the qp_index
 389 * Upper most bit depends on who reads it:
 390 *    If read by producer, then it means Full (1) or Not-Full (0)
 391 *    If read by consumer, then it means Empty (1) or Not-Empty (0)
 392 */
 393#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
 394#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
 395#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
 396
 397
 398/*
 399 * The following defines the offset in SDRAM for the c2_adapter_pci_regs_t
 400 * struct.
 401 */
 402#define C2_ADAPTER_PCI_REGS_OFFSET 0x10000
 403
 404#ifndef readq
 405static inline u64 readq(const void __iomem * addr)
 406{
 407        u64 ret = readl(addr + 4);
 408        ret <<= 32;
 409        ret |= readl(addr);
 410
 411        return ret;
 412}
 413#endif
 414
 415#ifndef writeq
 416static inline void __raw_writeq(u64 val, void __iomem * addr)
 417{
 418        __raw_writel((u32) (val), addr);
 419        __raw_writel((u32) (val >> 32), (addr + 4));
 420}
 421#endif
 422
 423#define C2_SET_CUR_RX(c2dev, cur_rx) \
 424        __raw_writel((__force u32) cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
 425
 426#define C2_GET_CUR_RX(c2dev) \
 427        be32_to_cpu((__force __be32) readl(c2dev->mmio_txp_ring + 4092))
 428
 429static inline struct c2_dev *to_c2dev(struct ib_device *ibdev)
 430{
 431        return container_of(ibdev, struct c2_dev, ibdev);
 432}
 433
 434static inline int c2_errno(void *reply)
 435{
 436        switch (c2_wr_get_result(reply)) {
 437        case C2_OK:
 438                return 0;
 439        case CCERR_NO_BUFS:
 440        case CCERR_INSUFFICIENT_RESOURCES:
 441        case CCERR_ZERO_RDMA_READ_RESOURCES:
 442                return -ENOMEM;
 443        case CCERR_MR_IN_USE:
 444        case CCERR_QP_IN_USE:
 445                return -EBUSY;
 446        case CCERR_ADDR_IN_USE:
 447                return -EADDRINUSE;
 448        case CCERR_ADDR_NOT_AVAIL:
 449                return -EADDRNOTAVAIL;
 450        case CCERR_CONN_RESET:
 451                return -ECONNRESET;
 452        case CCERR_NOT_IMPLEMENTED:
 453        case CCERR_INVALID_WQE:
 454                return -ENOSYS;
 455        case CCERR_QP_NOT_PRIVILEGED:
 456                return -EPERM;
 457        case CCERR_STACK_ERROR:
 458                return -EPROTO;
 459        case CCERR_ACCESS_VIOLATION:
 460        case CCERR_BASE_AND_BOUNDS_VIOLATION:
 461                return -EFAULT;
 462        case CCERR_STAG_STATE_NOT_INVALID:
 463        case CCERR_INVALID_ADDRESS:
 464        case CCERR_INVALID_CQ:
 465        case CCERR_INVALID_EP:
 466        case CCERR_INVALID_MODIFIER:
 467        case CCERR_INVALID_MTU:
 468        case CCERR_INVALID_PD_ID:
 469        case CCERR_INVALID_QP:
 470        case CCERR_INVALID_RNIC:
 471        case CCERR_INVALID_STAG:
 472                return -EINVAL;
 473        default:
 474                return -EAGAIN;
 475        }
 476}
 477
 478/* Device */
 479extern int c2_register_device(struct c2_dev *c2dev);
 480extern void c2_unregister_device(struct c2_dev *c2dev);
 481extern int c2_rnic_init(struct c2_dev *c2dev);
 482extern void c2_rnic_term(struct c2_dev *c2dev);
 483extern void c2_rnic_interrupt(struct c2_dev *c2dev);
 484extern int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
 485extern int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
 486
 487/* QPs */
 488extern int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd,
 489                       struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp);
 490extern void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp);
 491extern struct ib_qp *c2_get_qp(struct ib_device *device, int qpn);
 492extern int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
 493                        struct ib_qp_attr *attr, int attr_mask);
 494extern int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
 495                                 int ord, int ird);
 496extern int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
 497                        struct ib_send_wr **bad_wr);
 498extern int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
 499                           struct ib_recv_wr **bad_wr);
 500extern void c2_init_qp_table(struct c2_dev *c2dev);
 501extern void c2_cleanup_qp_table(struct c2_dev *c2dev);
 502extern void c2_set_qp_state(struct c2_qp *, int);
 503extern struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn);
 504
 505/* PDs */
 506extern int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd);
 507extern void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd);
 508extern int c2_init_pd_table(struct c2_dev *c2dev);
 509extern void c2_cleanup_pd_table(struct c2_dev *c2dev);
 510
 511/* CQs */
 512extern int c2_init_cq(struct c2_dev *c2dev, int entries,
 513                      struct c2_ucontext *ctx, struct c2_cq *cq);
 514extern void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq);
 515extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index);
 516extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
 517extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
 518extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
 519
 520/* CM */
 521extern int c2_llp_connect(struct iw_cm_id *cm_id,
 522                          struct iw_cm_conn_param *iw_param);
 523extern int c2_llp_accept(struct iw_cm_id *cm_id,
 524                         struct iw_cm_conn_param *iw_param);
 525extern int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata,
 526                         u8 pdata_len);
 527extern int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog);
 528extern int c2_llp_service_destroy(struct iw_cm_id *cm_id);
 529
 530/* MM */
 531extern int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
 532                                      int page_size, int pbl_depth, u32 length,
 533                                      u32 off, u64 *va, enum c2_acf acf,
 534                                      struct c2_mr *mr);
 535extern int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index);
 536
 537/* AE */
 538extern void c2_ae_event(struct c2_dev *c2dev, u32 mq_index);
 539
 540/* MQSP Allocator */
 541extern int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
 542                             struct sp_chunk **root);
 543extern void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root);
 544extern __be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
 545                             dma_addr_t *dma_addr, gfp_t gfp_mask);
 546extern void c2_free_mqsp(__be16* mqsp);
 547#endif
 548