linux/drivers/misc/genwqe/card_base.h
<<
>>
Prefs
   1#ifndef __CARD_BASE_H__
   2#define __CARD_BASE_H__
   3
   4/**
   5 * IBM Accelerator Family 'GenWQE'
   6 *
   7 * (C) Copyright IBM Corp. 2013
   8 *
   9 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
  10 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
  11 * Author: Michael Jung <mijung@de.ibm.com>
  12 * Author: Michael Ruettger <michael@ibmra.de>
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License (version 2 only)
  16 * as published by the Free Software Foundation.
  17 *
  18 * This program is distributed in the hope that it will be useful,
  19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21 * GNU General Public License for more details.
  22 */
  23
  24/*
  25 * Interfaces within the GenWQE module. Defines genwqe_card and
  26 * ddcb_queue as well as ddcb_requ.
  27 */
  28
  29#include <linux/kernel.h>
  30#include <linux/types.h>
  31#include <linux/cdev.h>
  32#include <linux/stringify.h>
  33#include <linux/pci.h>
  34#include <linux/semaphore.h>
  35#include <linux/uaccess.h>
  36#include <linux/io.h>
  37#include <linux/version.h>
  38#include <linux/debugfs.h>
  39#include <linux/slab.h>
  40
  41#include <linux/genwqe/genwqe_card.h>
  42#include "genwqe_driver.h"
  43
  44#define GENWQE_MSI_IRQS                 4  /* Just one supported, no MSIx */
  45#define GENWQE_FLAG_MSI_ENABLED         (1 << 0)
  46
  47#define GENWQE_MAX_VFS                  15 /* maximum 15 VFs are possible */
  48#define GENWQE_MAX_FUNCS                16 /* 1 PF and 15 VFs */
  49#define GENWQE_CARD_NO_MAX              (16 * GENWQE_MAX_FUNCS)
  50
  51/* Compile parameters, some of them appear in debugfs for later adjustment */
  52#define genwqe_ddcb_max                 32 /* DDCBs on the work-queue */
  53#define genwqe_polling_enabled          0  /* in case of irqs not working */
  54#define genwqe_ddcb_software_timeout    10 /* timeout per DDCB in seconds */
  55#define genwqe_kill_timeout             8  /* time until process gets killed */
  56#define genwqe_vf_jobtimeout_msec       250  /* 250 msec */
  57#define genwqe_pf_jobtimeout_msec       8000 /* 8 sec should be ok */
  58#define genwqe_health_check_interval    4 /* <= 0: disabled */
  59
  60/* Sysfs attribute groups used when we create the genwqe device */
  61extern const struct attribute_group *genwqe_attribute_groups[];
  62
  63/*
  64 * Config space for Genwqe5 A7:
  65 * 00:[14 10 4b 04]40 00 10 00[00 00 00 12]00 00 00 00
  66 * 10: 0c 00 00 f0 07 3c 00 00 00 00 00 00 00 00 00 00
  67 * 20: 00 00 00 00 00 00 00 00 00 00 00 00[14 10 4b 04]
  68 * 30: 00 00 00 00 50 00 00 00 00 00 00 00 00 00 00 00
  69 */
  70#define PCI_DEVICE_GENWQE               0x044b /* Genwqe DeviceID */
  71
  72#define PCI_SUBSYSTEM_ID_GENWQE5        0x035f /* Genwqe A5 Subsystem-ID */
  73#define PCI_SUBSYSTEM_ID_GENWQE5_NEW    0x044b /* Genwqe A5 Subsystem-ID */
  74#define PCI_CLASSCODE_GENWQE5           0x1200 /* UNKNOWN */
  75
  76#define PCI_SUBVENDOR_ID_IBM_SRIOV      0x0000
  77#define PCI_SUBSYSTEM_ID_GENWQE5_SRIOV  0x0000 /* Genwqe A5 Subsystem-ID */
  78#define PCI_CLASSCODE_GENWQE5_SRIOV     0x1200 /* UNKNOWN */
  79
  80#define GENWQE_SLU_ARCH_REQ             2 /* Required SLU architecture level */
  81
  82/**
  83 * struct genwqe_reg - Genwqe data dump functionality
  84 */
  85struct genwqe_reg {
  86        u32 addr;
  87        u32 idx;
  88        u64 val;
  89};
  90
  91/*
  92 * enum genwqe_dbg_type - Specify chip unit to dump/debug
  93 */
  94enum genwqe_dbg_type {
  95        GENWQE_DBG_UNIT0 = 0,  /* captured before prev errs cleared */
  96        GENWQE_DBG_UNIT1 = 1,
  97        GENWQE_DBG_UNIT2 = 2,
  98        GENWQE_DBG_UNIT3 = 3,
  99        GENWQE_DBG_UNIT4 = 4,
 100        GENWQE_DBG_UNIT5 = 5,
 101        GENWQE_DBG_UNIT6 = 6,
 102        GENWQE_DBG_UNIT7 = 7,
 103        GENWQE_DBG_REGS  = 8,
 104        GENWQE_DBG_DMA   = 9,
 105        GENWQE_DBG_UNITS = 10, /* max number of possible debug units  */
 106};
 107
 108/* Software error injection to simulate card failures */
 109#define GENWQE_INJECT_HARDWARE_FAILURE  0x00000001 /* injects -1 reg reads */
 110#define GENWQE_INJECT_BUS_RESET_FAILURE 0x00000002 /* pci_bus_reset fail */
 111#define GENWQE_INJECT_GFIR_FATAL        0x00000004 /* GFIR = 0x0000ffff */
 112#define GENWQE_INJECT_GFIR_INFO         0x00000008 /* GFIR = 0xffff0000 */
 113
 114/*
 115 * Genwqe card description and management data.
 116 *
 117 * Error-handling in case of card malfunction
 118 * ------------------------------------------
 119 *
 120 * If the card is detected to be defective the outside environment
 121 * will cause the PCI layer to call deinit (the cleanup function for
 122 * probe). This is the same effect like doing a unbind/bind operation
 123 * on the card.
 124 *
 125 * The genwqe card driver implements a health checking thread which
 126 * verifies the card function. If this detects a problem the cards
 127 * device is being shutdown and restarted again, along with a reset of
 128 * the card and queue.
 129 *
 130 * All functions accessing the card device return either -EIO or -ENODEV
 131 * code to indicate the malfunction to the user. The user has to close
 132 * the file descriptor and open a new one, once the card becomes
 133 * available again.
 134 *
 135 * If the open file descriptor is setup to receive SIGIO, the signal is
 136 * genereated for the application which has to provide a handler to
 137 * react on it. If the application does not close the open
 138 * file descriptor a SIGKILL is send to enforce freeing the cards
 139 * resources.
 140 *
 141 * I did not find a different way to prevent kernel problems due to
 142 * reference counters for the cards character devices getting out of
 143 * sync. The character device deallocation does not block, even if
 144 * there is still an open file descriptor pending. If this pending
 145 * descriptor is closed, the data structures used by the character
 146 * device is reinstantiated, which will lead to the reference counter
 147 * dropping below the allowed values.
 148 *
 149 * Card recovery
 150 * -------------
 151 *
 152 * To test the internal driver recovery the following command can be used:
 153 *   sudo sh -c 'echo 0xfffff > /sys/class/genwqe/genwqe0_card/err_inject'
 154 */
 155
 156
 157/**
 158 * struct dma_mapping_type - Mapping type definition
 159 *
 160 * To avoid memcpying data arround we use user memory directly. To do
 161 * this we need to pin/swap-in the memory and request a DMA address
 162 * for it.
 163 */
 164enum dma_mapping_type {
 165        GENWQE_MAPPING_RAW = 0,         /* contignous memory buffer */
 166        GENWQE_MAPPING_SGL_TEMP,        /* sglist dynamically used */
 167        GENWQE_MAPPING_SGL_PINNED,      /* sglist used with pinning */
 168};
 169
 170/**
 171 * struct dma_mapping - Information about memory mappings done by the driver
 172 */
 173struct dma_mapping {
 174        enum dma_mapping_type type;
 175
 176        void *u_vaddr;                  /* user-space vaddr/non-aligned */
 177        void *k_vaddr;                  /* kernel-space vaddr/non-aligned */
 178        dma_addr_t dma_addr;            /* physical DMA address */
 179
 180        struct page **page_list;        /* list of pages used by user buff */
 181        dma_addr_t *dma_list;           /* list of dma addresses per page */
 182        unsigned int nr_pages;          /* number of pages */
 183        unsigned int size;              /* size in bytes */
 184
 185        struct list_head card_list;     /* list of usr_maps for card */
 186        struct list_head pin_list;      /* list of pinned memory for dev */
 187};
 188
 189static inline void genwqe_mapping_init(struct dma_mapping *m,
 190                                       enum dma_mapping_type type)
 191{
 192        memset(m, 0, sizeof(*m));
 193        m->type = type;
 194}
 195
 196/**
 197 * struct ddcb_queue - DDCB queue data
 198 * @ddcb_max:          Number of DDCBs on the queue
 199 * @ddcb_next:         Next free DDCB
 200 * @ddcb_act:          Next DDCB supposed to finish
 201 * @ddcb_seq:          Sequence number of last DDCB
 202 * @ddcbs_in_flight:   Currently enqueued DDCBs
 203 * @ddcbs_completed:   Number of already completed DDCBs
 204 * @busy:              Number of -EBUSY returns
 205 * @ddcb_daddr:        DMA address of first DDCB in the queue
 206 * @ddcb_vaddr:        Kernel virtual address of first DDCB in the queue
 207 * @ddcb_req:          Associated requests (one per DDCB)
 208 * @ddcb_waitqs:       Associated wait queues (one per DDCB)
 209 * @ddcb_lock:         Lock to protect queuing operations
 210 * @ddcb_waitq:        Wait on next DDCB finishing
 211 */
 212
 213struct ddcb_queue {
 214        int ddcb_max;                   /* amount of DDCBs  */
 215        int ddcb_next;                  /* next available DDCB num */
 216        int ddcb_act;                   /* DDCB to be processed */
 217        u16 ddcb_seq;                   /* slc seq num */
 218        unsigned int ddcbs_in_flight;   /* number of ddcbs in processing */
 219        unsigned int ddcbs_completed;
 220        unsigned int ddcbs_max_in_flight;
 221        unsigned int busy;              /* how many times -EBUSY? */
 222
 223        dma_addr_t ddcb_daddr;          /* DMA address */
 224        struct ddcb *ddcb_vaddr;        /* kernel virtual addr for DDCBs */
 225        struct ddcb_requ **ddcb_req;    /* ddcb processing parameter */
 226        wait_queue_head_t *ddcb_waitqs; /* waitqueue per ddcb */
 227
 228        spinlock_t ddcb_lock;           /* exclusive access to queue */
 229        wait_queue_head_t ddcb_waitq;   /* wait for ddcb processing */
 230
 231        /* registers or the respective queue to be used */
 232        u32 IO_QUEUE_CONFIG;
 233        u32 IO_QUEUE_STATUS;
 234        u32 IO_QUEUE_SEGMENT;
 235        u32 IO_QUEUE_INITSQN;
 236        u32 IO_QUEUE_WRAP;
 237        u32 IO_QUEUE_OFFSET;
 238        u32 IO_QUEUE_WTIME;
 239        u32 IO_QUEUE_ERRCNTS;
 240        u32 IO_QUEUE_LRW;
 241};
 242
 243/*
 244 * GFIR, SLU_UNITCFG, APP_UNITCFG
 245 *   8 Units with FIR/FEC + 64 * 2ndary FIRS/FEC.
 246 */
 247#define GENWQE_FFDC_REGS        (3 + (8 * (2 + 2 * 64)))
 248
 249struct genwqe_ffdc {
 250        unsigned int entries;
 251        struct genwqe_reg *regs;
 252};
 253
 254/**
 255 * struct genwqe_dev - GenWQE device information
 256 * @card_state:       Card operation state, see above
 257 * @ffdc:             First Failure Data Capture buffers for each unit
 258 * @card_thread:      Working thread to operate the DDCB queue
 259 * @card_waitq:       Wait queue used in card_thread
 260 * @queue:            DDCB queue
 261 * @health_thread:    Card monitoring thread (only for PFs)
 262 * @health_waitq:     Wait queue used in health_thread
 263 * @pci_dev:          Associated PCI device (function)
 264 * @mmio:             Base address of 64-bit register space
 265 * @mmio_len:         Length of register area
 266 * @file_lock:        Lock to protect access to file_list
 267 * @file_list:        List of all processes with open GenWQE file descriptors
 268 *
 269 * This struct contains all information needed to communicate with a
 270 * GenWQE card. It is initialized when a GenWQE device is found and
 271 * destroyed when it goes away. It holds data to maintain the queue as
 272 * well as data needed to feed the user interfaces.
 273 */
 274struct genwqe_dev {
 275        enum genwqe_card_state card_state;
 276        spinlock_t print_lock;
 277
 278        int card_idx;                   /* card index 0..CARD_NO_MAX-1 */
 279        u64 flags;                      /* general flags */
 280
 281        /* FFDC data gathering */
 282        struct genwqe_ffdc ffdc[GENWQE_DBG_UNITS];
 283
 284        /* DDCB workqueue */
 285        struct task_struct *card_thread;
 286        wait_queue_head_t queue_waitq;
 287        struct ddcb_queue queue;        /* genwqe DDCB queue */
 288        unsigned int irqs_processed;
 289
 290        /* Card health checking thread */
 291        struct task_struct *health_thread;
 292        wait_queue_head_t health_waitq;
 293
 294        /* char device */
 295        dev_t  devnum_genwqe;           /* major/minor num card */
 296        struct class *class_genwqe;     /* reference to class object */
 297        struct device *dev;             /* for device creation */
 298        struct cdev cdev_genwqe;        /* char device for card */
 299
 300        struct dentry *debugfs_root;    /* debugfs card root directory */
 301        struct dentry *debugfs_genwqe;  /* debugfs driver root directory */
 302
 303        /* pci resources */
 304        struct pci_dev *pci_dev;        /* PCI device */
 305        void __iomem *mmio;             /* BAR-0 MMIO start */
 306        unsigned long mmio_len;
 307        u16 num_vfs;
 308        u32 vf_jobtimeout_msec[GENWQE_MAX_VFS];
 309        int is_privileged;              /* access to all regs possible */
 310
 311        /* config regs which we need often */
 312        u64 slu_unitcfg;
 313        u64 app_unitcfg;
 314        u64 softreset;
 315        u64 err_inject;
 316        u64 last_gfir;
 317        char app_name[5];
 318
 319        spinlock_t file_lock;           /* lock for open files */
 320        struct list_head file_list;     /* list of open files */
 321
 322        /* debugfs parameters */
 323        int ddcb_software_timeout;      /* wait until DDCB times out */
 324        int skip_recovery;              /* circumvention if recovery fails */
 325        int kill_timeout;               /* wait after sending SIGKILL */
 326};
 327
 328/**
 329 * enum genwqe_requ_state - State of a DDCB execution request
 330 */
 331enum genwqe_requ_state {
 332        GENWQE_REQU_NEW      = 0,
 333        GENWQE_REQU_ENQUEUED = 1,
 334        GENWQE_REQU_TAPPED   = 2,
 335        GENWQE_REQU_FINISHED = 3,
 336        GENWQE_REQU_STATE_MAX,
 337};
 338
 339/**
 340 * struct genwqe_sgl - Scatter gather list describing user-space memory
 341 * @sgl:            scatter gather list needs to be 128 byte aligned
 342 * @sgl_dma_addr:   dma address of sgl
 343 * @sgl_size:       size of area used for sgl
 344 * @user_addr:      user-space address of memory area
 345 * @user_size:      size of user-space memory area
 346 * @page:           buffer for partial pages if needed
 347 * @page_dma_addr:  dma address partial pages
 348 */
 349struct genwqe_sgl {
 350        dma_addr_t sgl_dma_addr;
 351        struct sg_entry *sgl;
 352        size_t sgl_size;        /* size of sgl */
 353
 354        void __user *user_addr; /* user-space base-address */
 355        size_t user_size;       /* size of memory area */
 356
 357        unsigned long nr_pages;
 358        unsigned long fpage_offs;
 359        size_t fpage_size;
 360        size_t lpage_size;
 361
 362        void *fpage;
 363        dma_addr_t fpage_dma_addr;
 364
 365        void *lpage;
 366        dma_addr_t lpage_dma_addr;
 367};
 368
 369int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
 370                          void __user *user_addr, size_t user_size);
 371
 372int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
 373                     dma_addr_t *dma_list);
 374
 375int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl);
 376
 377/**
 378 * struct ddcb_requ - Kernel internal representation of the DDCB request
 379 * @cmd:          User space representation of the DDCB execution request
 380 */
 381struct ddcb_requ {
 382        /* kernel specific content */
 383        enum genwqe_requ_state req_state; /* request status */
 384        int num;                          /* ddcb_no for this request */
 385        struct ddcb_queue *queue;         /* associated queue */
 386
 387        struct dma_mapping  dma_mappings[DDCB_FIXUPS];
 388        struct genwqe_sgl sgls[DDCB_FIXUPS];
 389
 390        /* kernel/user shared content */
 391        struct genwqe_ddcb_cmd cmd;     /* ddcb_no for this request */
 392        struct genwqe_debug_data debug_data;
 393};
 394
 395/**
 396 * struct genwqe_file - Information for open GenWQE devices
 397 */
 398struct genwqe_file {
 399        struct genwqe_dev *cd;
 400        struct genwqe_driver *client;
 401        struct file *filp;
 402
 403        struct fasync_struct *async_queue;
 404        struct task_struct *owner;
 405        struct list_head list;          /* entry in list of open files */
 406
 407        spinlock_t map_lock;            /* lock for dma_mappings */
 408        struct list_head map_list;      /* list of dma_mappings */
 409
 410        spinlock_t pin_lock;            /* lock for pinned memory */
 411        struct list_head pin_list;      /* list of pinned memory */
 412};
 413
 414int  genwqe_setup_service_layer(struct genwqe_dev *cd); /* for PF only */
 415int  genwqe_finish_queue(struct genwqe_dev *cd);
 416int  genwqe_release_service_layer(struct genwqe_dev *cd);
 417
 418/**
 419 * genwqe_get_slu_id() - Read Service Layer Unit Id
 420 * Return: 0x00: Development code
 421 *         0x01: SLC1 (old)
 422 *         0x02: SLC2 (sept2012)
 423 *         0x03: SLC2 (feb2013, generic driver)
 424 */
 425static inline int genwqe_get_slu_id(struct genwqe_dev *cd)
 426{
 427        return (int)((cd->slu_unitcfg >> 32) & 0xff);
 428}
 429
 430int  genwqe_ddcbs_in_flight(struct genwqe_dev *cd);
 431
 432u8   genwqe_card_type(struct genwqe_dev *cd);
 433int  genwqe_card_reset(struct genwqe_dev *cd);
 434int  genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count);
 435void genwqe_reset_interrupt_capability(struct genwqe_dev *cd);
 436
 437int  genwqe_device_create(struct genwqe_dev *cd);
 438int  genwqe_device_remove(struct genwqe_dev *cd);
 439
 440/* debugfs */
 441int  genwqe_init_debugfs(struct genwqe_dev *cd);
 442void genqwe_exit_debugfs(struct genwqe_dev *cd);
 443
 444int  genwqe_read_softreset(struct genwqe_dev *cd);
 445
 446/* Hardware Circumventions */
 447int  genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd);
 448int  genwqe_flash_readback_fails(struct genwqe_dev *cd);
 449
 450/**
 451 * genwqe_write_vreg() - Write register in VF window
 452 * @cd:    genwqe device
 453 * @reg:   register address
 454 * @val:   value to write
 455 * @func:  0: PF, 1: VF0, ..., 15: VF14
 456 */
 457int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func);
 458
 459/**
 460 * genwqe_read_vreg() - Read register in VF window
 461 * @cd:    genwqe device
 462 * @reg:   register address
 463 * @func:  0: PF, 1: VF0, ..., 15: VF14
 464 *
 465 * Return: content of the register
 466 */
 467u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func);
 468
 469/* FFDC Buffer Management */
 470int  genwqe_ffdc_buff_size(struct genwqe_dev *cd, int unit_id);
 471int  genwqe_ffdc_buff_read(struct genwqe_dev *cd, int unit_id,
 472                           struct genwqe_reg *regs, unsigned int max_regs);
 473int  genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
 474                           unsigned int max_regs, int all);
 475int  genwqe_ffdc_dump_dma(struct genwqe_dev *cd,
 476                          struct genwqe_reg *regs, unsigned int max_regs);
 477
 478int  genwqe_init_debug_data(struct genwqe_dev *cd,
 479                            struct genwqe_debug_data *d);
 480
 481void genwqe_init_crc32(void);
 482int  genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len);
 483
 484/* Memory allocation/deallocation; dma address handling */
 485int  genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m,
 486                      void *uaddr, unsigned long size,
 487                      struct ddcb_requ *req);
 488
 489int  genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
 490                        struct ddcb_requ *req);
 491
 492static inline bool dma_mapping_used(struct dma_mapping *m)
 493{
 494        if (!m)
 495                return 0;
 496        return m->size != 0;
 497}
 498
 499/**
 500 * __genwqe_execute_ddcb() - Execute DDCB request with addr translation
 501 *
 502 * This function will do the address translation changes to the DDCBs
 503 * according to the definitions required by the ATS field. It looks up
 504 * the memory allocation buffer or does vmap/vunmap for the respective
 505 * user-space buffers, inclusive page pinning and scatter gather list
 506 * buildup and teardown.
 507 */
 508int  __genwqe_execute_ddcb(struct genwqe_dev *cd,
 509                           struct genwqe_ddcb_cmd *cmd);
 510
 511/**
 512 * __genwqe_execute_raw_ddcb() - Execute DDCB request without addr translation
 513 *
 514 * This version will not do address translation or any modifcation of
 515 * the DDCB data. It is used e.g. for the MoveFlash DDCB which is
 516 * entirely prepared by the driver itself. That means the appropriate
 517 * DMA addresses are already in the DDCB and do not need any
 518 * modification.
 519 */
 520int  __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
 521                               struct genwqe_ddcb_cmd *cmd);
 522
 523int  __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
 524int  __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
 525int  __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
 526
 527/* register access */
 528int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val);
 529u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs);
 530int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val);
 531u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs);
 532
 533void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
 534                                 dma_addr_t *dma_handle);
 535void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
 536                              void *vaddr, dma_addr_t dma_handle);
 537
 538/* Base clock frequency in MHz */
 539int  genwqe_base_clock_frequency(struct genwqe_dev *cd);
 540
 541/* Before FFDC is captured the traps should be stopped. */
 542void genwqe_stop_traps(struct genwqe_dev *cd);
 543void genwqe_start_traps(struct genwqe_dev *cd);
 544
 545/* Hardware circumvention */
 546bool genwqe_need_err_masking(struct genwqe_dev *cd);
 547
 548/**
 549 * genwqe_is_privileged() - Determine operation mode for PCI function
 550 *
 551 * On Intel with SRIOV support we see:
 552 *   PF: is_physfn = 1 is_virtfn = 0
 553 *   VF: is_physfn = 0 is_virtfn = 1
 554 *
 555 * On Systems with no SRIOV support _and_ virtualized systems we get:
 556 *       is_physfn = 0 is_virtfn = 0
 557 *
 558 * Other vendors have individual pci device ids to distinguish between
 559 * virtual function drivers and physical function drivers. GenWQE
 560 * unfortunately has just on pci device id for both, VFs and PF.
 561 *
 562 * The following code is used to distinguish if the card is running in
 563 * privileged mode, either as true PF or in a virtualized system with
 564 * full register access e.g. currently on PowerPC.
 565 *
 566 * if (pci_dev->is_virtfn)
 567 *          cd->is_privileged = 0;
 568 *  else
 569 *          cd->is_privileged = (__genwqe_readq(cd, IO_SLU_BITSTREAM)
 570 *                               != IO_ILLEGAL_VALUE);
 571 */
 572static inline int genwqe_is_privileged(struct genwqe_dev *cd)
 573{
 574        return cd->is_privileged;
 575}
 576
 577#endif  /* __CARD_BASE_H__ */
 578