linux/drivers/infiniband/hw/amso1100/c2.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
   3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#ifndef __C2_H
  35#define __C2_H
  36
  37#include <linux/netdevice.h>
  38#include <linux/spinlock.h>
  39#include <linux/kernel.h>
  40#include <linux/pci.h>
  41#include <linux/dma-mapping.h>
  42#include <linux/idr.h>
  43
  44#include "c2_provider.h"
  45#include "c2_mq.h"
  46#include "c2_status.h"
  47
  48#define DRV_NAME     "c2"
  49#define DRV_VERSION  "1.1"
  50#define PFX          DRV_NAME ": "
  51
  52#define BAR_0                0
  53#define BAR_2                2
  54#define BAR_4                4
  55
  56#define RX_BUF_SIZE         (1536 + 8)
  57#define ETH_JUMBO_MTU        9000
  58#define C2_MAGIC            "CEPHEUS"
  59#define C2_VERSION           4
  60#define C2_IVN              (18 & 0x7fffffff)
  61
  62#define C2_REG0_SIZE        (16 * 1024)
  63#define C2_REG2_SIZE        (2 * 1024 * 1024)
  64#define C2_REG4_SIZE        (256 * 1024 * 1024)
  65#define C2_NUM_TX_DESC       341
  66#define C2_NUM_RX_DESC       256
  67#define C2_PCI_REGS_OFFSET  (0x10000)
  68#define C2_RXP_HRXDQ_OFFSET (((C2_REG4_SIZE)/2))
  69#define C2_RXP_HRXDQ_SIZE   (4096)
  70#define C2_TXP_HTXDQ_OFFSET (((C2_REG4_SIZE)/2) + C2_RXP_HRXDQ_SIZE)
  71#define C2_TXP_HTXDQ_SIZE   (4096)
  72#define C2_TX_TIMEOUT       (6*HZ)
  73
  74/* CEPHEUS */
  75static const u8 c2_magic[] = {
  76        0x43, 0x45, 0x50, 0x48, 0x45, 0x55, 0x53
  77};
  78
  79enum adapter_pci_regs {
  80        C2_REGS_MAGIC = 0x0000,
  81        C2_REGS_VERS = 0x0008,
  82        C2_REGS_IVN = 0x000C,
  83        C2_REGS_PCI_WINSIZE = 0x0010,
  84        C2_REGS_Q0_QSIZE = 0x0014,
  85        C2_REGS_Q0_MSGSIZE = 0x0018,
  86        C2_REGS_Q0_POOLSTART = 0x001C,
  87        C2_REGS_Q0_SHARED = 0x0020,
  88        C2_REGS_Q1_QSIZE = 0x0024,
  89        C2_REGS_Q1_MSGSIZE = 0x0028,
  90        C2_REGS_Q1_SHARED = 0x0030,
  91        C2_REGS_Q2_QSIZE = 0x0034,
  92        C2_REGS_Q2_MSGSIZE = 0x0038,
  93        C2_REGS_Q2_SHARED = 0x0040,
  94        C2_REGS_ENADDR = 0x004C,
  95        C2_REGS_RDMA_ENADDR = 0x0054,
  96        C2_REGS_HRX_CUR = 0x006C,
  97};
  98
  99struct c2_adapter_pci_regs {
 100        char reg_magic[8];
 101        u32 version;
 102        u32 ivn;
 103        u32 pci_window_size;
 104        u32 q0_q_size;
 105        u32 q0_msg_size;
 106        u32 q0_pool_start;
 107        u32 q0_shared;
 108        u32 q1_q_size;
 109        u32 q1_msg_size;
 110        u32 q1_pool_start;
 111        u32 q1_shared;
 112        u32 q2_q_size;
 113        u32 q2_msg_size;
 114        u32 q2_pool_start;
 115        u32 q2_shared;
 116        u32 log_start;
 117        u32 log_size;
 118        u8 host_enaddr[8];
 119        u8 rdma_enaddr[8];
 120        u32 crash_entry;
 121        u32 crash_ready[2];
 122        u32 fw_txd_cur;
 123        u32 fw_hrxd_cur;
 124        u32 fw_rxd_cur;
 125};
 126
 127enum pci_regs {
 128        C2_HISR = 0x0000,
 129        C2_DISR = 0x0004,
 130        C2_HIMR = 0x0008,
 131        C2_DIMR = 0x000C,
 132        C2_NISR0 = 0x0010,
 133        C2_NISR1 = 0x0014,
 134        C2_NIMR0 = 0x0018,
 135        C2_NIMR1 = 0x001C,
 136        C2_IDIS = 0x0020,
 137};
 138
 139enum {
 140        C2_PCI_HRX_INT = 1 << 8,
 141        C2_PCI_HTX_INT = 1 << 17,
 142        C2_PCI_HRX_QUI = 1 << 31,
 143};
 144
 145/*
 146 * Cepheus registers in BAR0.
 147 */
 148struct c2_pci_regs {
 149        u32 hostisr;
 150        u32 dmaisr;
 151        u32 hostimr;
 152        u32 dmaimr;
 153        u32 netisr0;
 154        u32 netisr1;
 155        u32 netimr0;
 156        u32 netimr1;
 157        u32 int_disable;
 158};
 159
 160/* TXP flags */
 161enum c2_txp_flags {
 162        TXP_HTXD_DONE = 0,
 163        TXP_HTXD_READY = 1 << 0,
 164        TXP_HTXD_UNINIT = 1 << 1,
 165};
 166
 167/* RXP flags */
 168enum c2_rxp_flags {
 169        RXP_HRXD_UNINIT = 0,
 170        RXP_HRXD_READY = 1 << 0,
 171        RXP_HRXD_DONE = 1 << 1,
 172};
 173
 174/* RXP status */
 175enum c2_rxp_status {
 176        RXP_HRXD_ZERO = 0,
 177        RXP_HRXD_OK = 1 << 0,
 178        RXP_HRXD_BUF_OV = 1 << 1,
 179};
 180
 181/* TXP descriptor fields */
 182enum txp_desc {
 183        C2_TXP_FLAGS = 0x0000,
 184        C2_TXP_LEN = 0x0002,
 185        C2_TXP_ADDR = 0x0004,
 186};
 187
 188/* RXP descriptor fields */
 189enum rxp_desc {
 190        C2_RXP_FLAGS = 0x0000,
 191        C2_RXP_STATUS = 0x0002,
 192        C2_RXP_COUNT = 0x0004,
 193        C2_RXP_LEN = 0x0006,
 194        C2_RXP_ADDR = 0x0008,
 195};
 196
 197struct c2_txp_desc {
 198        u16 flags;
 199        u16 len;
 200        u64 addr;
 201} __attribute__ ((packed));
 202
 203struct c2_rxp_desc {
 204        u16 flags;
 205        u16 status;
 206        u16 count;
 207        u16 len;
 208        u64 addr;
 209} __attribute__ ((packed));
 210
 211struct c2_rxp_hdr {
 212        u16 flags;
 213        u16 status;
 214        u16 len;
 215        u16 rsvd;
 216} __attribute__ ((packed));
 217
 218struct c2_tx_desc {
 219        u32 len;
 220        u32 status;
 221        dma_addr_t next_offset;
 222};
 223
 224struct c2_rx_desc {
 225        u32 len;
 226        u32 status;
 227        dma_addr_t next_offset;
 228};
 229
 230struct c2_alloc {
 231        u32 last;
 232        u32 max;
 233        spinlock_t lock;
 234        unsigned long *table;
 235};
 236
 237struct c2_array {
 238        struct {
 239                void **page;
 240                int used;
 241        } *page_list;
 242};
 243
 244/*
 245 * The MQ shared pointer pool is organized as a linked list of
 246 * chunks. Each chunk contains a linked list of free shared pointers
 247 * that can be allocated to a given user mode client.
 248 *
 249 */
 250struct sp_chunk {
 251        struct sp_chunk *next;
 252        dma_addr_t dma_addr;
 253        DECLARE_PCI_UNMAP_ADDR(mapping);
 254        u16 head;
 255        u16 shared_ptr[0];
 256};
 257
 258struct c2_pd_table {
 259        u32 last;
 260        u32 max;
 261        spinlock_t lock;
 262        unsigned long *table;
 263};
 264
 265struct c2_qp_table {
 266        struct idr idr;
 267        spinlock_t lock;
 268        int last;
 269};
 270
 271struct c2_element {
 272        struct c2_element *next;
 273        void *ht_desc;          /* host     descriptor */
 274        void __iomem *hw_desc;  /* hardware descriptor */
 275        struct sk_buff *skb;
 276        dma_addr_t mapaddr;
 277        u32 maplen;
 278};
 279
 280struct c2_ring {
 281        struct c2_element *to_clean;
 282        struct c2_element *to_use;
 283        struct c2_element *start;
 284        unsigned long count;
 285};
 286
 287struct c2_dev {
 288        struct ib_device ibdev;
 289        void __iomem *regs;
 290        void __iomem *mmio_txp_ring; /* remapped adapter memory for hw rings */
 291        void __iomem *mmio_rxp_ring;
 292        spinlock_t lock;
 293        struct pci_dev *pcidev;
 294        struct net_device *netdev;
 295        struct net_device *pseudo_netdev;
 296        unsigned int cur_tx;
 297        unsigned int cur_rx;
 298        u32 adapter_handle;
 299        int device_cap_flags;
 300        void __iomem *kva;      /* KVA device memory */
 301        unsigned long pa;       /* PA device memory */
 302        void **qptr_array;
 303
 304        struct kmem_cache *host_msg_cache;
 305
 306        struct list_head cca_link;              /* adapter list */
 307        struct list_head eh_wakeup_list;        /* event wakeup list */
 308        wait_queue_head_t req_vq_wo;
 309
 310        /* Cached RNIC properties */
 311        struct ib_device_attr props;
 312
 313        struct c2_pd_table pd_table;
 314        struct c2_qp_table qp_table;
 315        int ports;              /* num of GigE ports */
 316        int devnum;
 317        spinlock_t vqlock;      /* sync vbs req MQ */
 318
 319        /* Verbs Queues */
 320        struct c2_mq req_vq;    /* Verbs Request MQ */
 321        struct c2_mq rep_vq;    /* Verbs Reply MQ */
 322        struct c2_mq aeq;       /* Async Events MQ */
 323
 324        /* Kernel client MQs */
 325        struct sp_chunk *kern_mqsp_pool;
 326
 327        /* Device updates these values when posting messages to a host
 328         * target queue */
 329        u16 req_vq_shared;
 330        u16 rep_vq_shared;
 331        u16 aeq_shared;
 332        u16 irq_claimed;
 333
 334        /*
 335         * Shared host target pages for user-accessible MQs.
 336         */
 337        int hthead;             /* index of first free entry */
 338        void *htpages;          /* kernel vaddr */
 339        int htlen;              /* length of htpages memory */
 340        void *htuva;            /* user mapped vaddr */
 341        spinlock_t htlock;      /* serialize allocation */
 342
 343        u64 adapter_hint_uva;   /* access to the activity FIFO */
 344
 345        //      spinlock_t aeq_lock;
 346        //      spinlock_t rnic_lock;
 347
 348        __be16 *hint_count;
 349        dma_addr_t hint_count_dma;
 350        u16 hints_read;
 351
 352        int init;               /* TRUE if it's ready */
 353        char ae_cache_name[16];
 354        char vq_cache_name[16];
 355};
 356
 357struct c2_port {
 358        u32 msg_enable;
 359        struct c2_dev *c2dev;
 360        struct net_device *netdev;
 361
 362        spinlock_t tx_lock;
 363        u32 tx_avail;
 364        struct c2_ring tx_ring;
 365        struct c2_ring rx_ring;
 366
 367        void *mem;              /* PCI memory for host rings */
 368        dma_addr_t dma;
 369        unsigned long mem_size;
 370
 371        u32 rx_buf_size;
 372};
 373
 374/*
 375 * Activity FIFO registers in BAR0.
 376 */
 377#define PCI_BAR0_HOST_HINT      0x100
 378#define PCI_BAR0_ADAPTER_HINT   0x2000
 379
 380/*
 381 * Ammasso PCI vendor id and Cepheus PCI device id.
 382 */
 383#define CQ_ARMED        0x01
 384#define CQ_WAIT_FOR_DMA 0x80
 385
 386/*
 387 * The format of a hint is as follows:
 388 * Lower 16 bits are the count of hints for the queue.
 389 * Next 15 bits are the qp_index
 390 * Upper most bit depends on who reads it:
 391 *    If read by producer, then it means Full (1) or Not-Full (0)
 392 *    If read by consumer, then it means Empty (1) or Not-Empty (0)
 393 */
 394#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
 395#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
 396#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
 397
 398
 399/*
 400 * The following defines the offset in SDRAM for the c2_adapter_pci_regs_t
 401 * struct.
 402 */
 403#define C2_ADAPTER_PCI_REGS_OFFSET 0x10000
 404
 405#ifndef readq
 406static inline u64 readq(const void __iomem * addr)
 407{
 408        u64 ret = readl(addr + 4);
 409        ret <<= 32;
 410        ret |= readl(addr);
 411
 412        return ret;
 413}
 414#endif
 415
 416#ifndef writeq
 417static inline void __raw_writeq(u64 val, void __iomem * addr)
 418{
 419        __raw_writel((u32) (val), addr);
 420        __raw_writel((u32) (val >> 32), (addr + 4));
 421}
 422#endif
 423
 424#define C2_SET_CUR_RX(c2dev, cur_rx) \
 425        __raw_writel((__force u32) cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
 426
 427#define C2_GET_CUR_RX(c2dev) \
 428        be32_to_cpu((__force __be32) readl(c2dev->mmio_txp_ring + 4092))
 429
 430static inline struct c2_dev *to_c2dev(struct ib_device *ibdev)
 431{
 432        return container_of(ibdev, struct c2_dev, ibdev);
 433}
 434
 435static inline int c2_errno(void *reply)
 436{
 437        switch (c2_wr_get_result(reply)) {
 438        case C2_OK:
 439                return 0;
 440        case CCERR_NO_BUFS:
 441        case CCERR_INSUFFICIENT_RESOURCES:
 442        case CCERR_ZERO_RDMA_READ_RESOURCES:
 443                return -ENOMEM;
 444        case CCERR_MR_IN_USE:
 445        case CCERR_QP_IN_USE:
 446                return -EBUSY;
 447        case CCERR_ADDR_IN_USE:
 448                return -EADDRINUSE;
 449        case CCERR_ADDR_NOT_AVAIL:
 450                return -EADDRNOTAVAIL;
 451        case CCERR_CONN_RESET:
 452                return -ECONNRESET;
 453        case CCERR_NOT_IMPLEMENTED:
 454        case CCERR_INVALID_WQE:
 455                return -ENOSYS;
 456        case CCERR_QP_NOT_PRIVILEGED:
 457                return -EPERM;
 458        case CCERR_STACK_ERROR:
 459                return -EPROTO;
 460        case CCERR_ACCESS_VIOLATION:
 461        case CCERR_BASE_AND_BOUNDS_VIOLATION:
 462                return -EFAULT;
 463        case CCERR_STAG_STATE_NOT_INVALID:
 464        case CCERR_INVALID_ADDRESS:
 465        case CCERR_INVALID_CQ:
 466        case CCERR_INVALID_EP:
 467        case CCERR_INVALID_MODIFIER:
 468        case CCERR_INVALID_MTU:
 469        case CCERR_INVALID_PD_ID:
 470        case CCERR_INVALID_QP:
 471        case CCERR_INVALID_RNIC:
 472        case CCERR_INVALID_STAG:
 473                return -EINVAL;
 474        default:
 475                return -EAGAIN;
 476        }
 477}
 478
 479/* Device */
 480extern int c2_register_device(struct c2_dev *c2dev);
 481extern void c2_unregister_device(struct c2_dev *c2dev);
 482extern int c2_rnic_init(struct c2_dev *c2dev);
 483extern void c2_rnic_term(struct c2_dev *c2dev);
 484extern void c2_rnic_interrupt(struct c2_dev *c2dev);
 485extern int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
 486extern int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
 487
 488/* QPs */
 489extern int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd,
 490                       struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp);
 491extern void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp);
 492extern struct ib_qp *c2_get_qp(struct ib_device *device, int qpn);
 493extern int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
 494                        struct ib_qp_attr *attr, int attr_mask);
 495extern int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
 496                                 int ord, int ird);
 497extern int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
 498                        struct ib_send_wr **bad_wr);
 499extern int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
 500                           struct ib_recv_wr **bad_wr);
 501extern void __devinit c2_init_qp_table(struct c2_dev *c2dev);
 502extern void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev);
 503extern void c2_set_qp_state(struct c2_qp *, int);
 504extern struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn);
 505
 506/* PDs */
 507extern int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd);
 508extern void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd);
 509extern int __devinit c2_init_pd_table(struct c2_dev *c2dev);
 510extern void __devexit c2_cleanup_pd_table(struct c2_dev *c2dev);
 511
 512/* CQs */
 513extern int c2_init_cq(struct c2_dev *c2dev, int entries,
 514                      struct c2_ucontext *ctx, struct c2_cq *cq);
 515extern void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq);
 516extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index);
 517extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
 518extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
 519extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
 520
 521/* CM */
 522extern int c2_llp_connect(struct iw_cm_id *cm_id,
 523                          struct iw_cm_conn_param *iw_param);
 524extern int c2_llp_accept(struct iw_cm_id *cm_id,
 525                         struct iw_cm_conn_param *iw_param);
 526extern int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata,
 527                         u8 pdata_len);
 528extern int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog);
 529extern int c2_llp_service_destroy(struct iw_cm_id *cm_id);
 530
 531/* MM */
 532extern int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
 533                                      int page_size, int pbl_depth, u32 length,
 534                                      u32 off, u64 *va, enum c2_acf acf,
 535                                      struct c2_mr *mr);
 536extern int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index);
 537
 538/* AE */
 539extern void c2_ae_event(struct c2_dev *c2dev, u32 mq_index);
 540
 541/* MQSP Allocator */
 542extern int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
 543                             struct sp_chunk **root);
 544extern void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root);
 545extern __be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
 546                             dma_addr_t *dma_addr, gfp_t gfp_mask);
 547extern void c2_free_mqsp(__be16* mqsp);
 548#endif
 549