linux/drivers/misc/hpilo.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Driver for the HP iLO management processor.
   4 *
   5 * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
   6 *      David Altobelli <david.altobelli@hpe.com>
   7 */
   8#include <linux/kernel.h>
   9#include <linux/types.h>
  10#include <linux/module.h>
  11#include <linux/fs.h>
  12#include <linux/pci.h>
  13#include <linux/interrupt.h>
  14#include <linux/ioport.h>
  15#include <linux/device.h>
  16#include <linux/file.h>
  17#include <linux/cdev.h>
  18#include <linux/sched.h>
  19#include <linux/spinlock.h>
  20#include <linux/delay.h>
  21#include <linux/uaccess.h>
  22#include <linux/io.h>
  23#include <linux/wait.h>
  24#include <linux/poll.h>
  25#include <linux/slab.h>
  26#include "hpilo.h"
  27
  28static struct class *ilo_class;
  29static unsigned int ilo_major;
  30static unsigned int max_ccb = 16;
  31static char ilo_hwdev[MAX_ILO_DEV];
  32static const struct pci_device_id ilo_blacklist[] = {
  33        /* auxiliary iLO */
  34        {PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP, 0x1979)},
  35        /* CL */
  36        {PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP_3PAR, 0x0289)},
  37        {}
  38};
  39
  40static inline int get_entry_id(int entry)
  41{
  42        return (entry & ENTRY_MASK_DESCRIPTOR) >> ENTRY_BITPOS_DESCRIPTOR;
  43}
  44
  45static inline int get_entry_len(int entry)
  46{
  47        return ((entry & ENTRY_MASK_QWORDS) >> ENTRY_BITPOS_QWORDS) << 3;
  48}
  49
  50static inline int mk_entry(int id, int len)
  51{
  52        int qlen = len & 7 ? (len >> 3) + 1 : len >> 3;
  53        return id << ENTRY_BITPOS_DESCRIPTOR | qlen << ENTRY_BITPOS_QWORDS;
  54}
  55
  56static inline int desc_mem_sz(int nr_entry)
  57{
  58        return nr_entry << L2_QENTRY_SZ;
  59}
  60
  61/*
  62 * FIFO queues, shared with hardware.
  63 *
  64 * If a queue has empty slots, an entry is added to the queue tail,
  65 * and that entry is marked as occupied.
  66 * Entries can be dequeued from the head of the list, when the device
  67 * has marked the entry as consumed.
  68 *
  69 * Returns true on successful queue/dequeue, false on failure.
  70 */
  71static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry)
  72{
  73        struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
  74        unsigned long flags;
  75        int ret = 0;
  76
  77        spin_lock_irqsave(&hw->fifo_lock, flags);
  78        if (!(fifo_q->fifobar[(fifo_q->tail + 1) & fifo_q->imask]
  79              & ENTRY_MASK_O)) {
  80                fifo_q->fifobar[fifo_q->tail & fifo_q->imask] |=
  81                                (entry & ENTRY_MASK_NOSTATE) | fifo_q->merge;
  82                fifo_q->tail += 1;
  83                ret = 1;
  84        }
  85        spin_unlock_irqrestore(&hw->fifo_lock, flags);
  86
  87        return ret;
  88}
  89
  90static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry)
  91{
  92        struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
  93        unsigned long flags;
  94        int ret = 0;
  95        u64 c;
  96
  97        spin_lock_irqsave(&hw->fifo_lock, flags);
  98        c = fifo_q->fifobar[fifo_q->head & fifo_q->imask];
  99        if (c & ENTRY_MASK_C) {
 100                if (entry)
 101                        *entry = c & ENTRY_MASK_NOSTATE;
 102
 103                fifo_q->fifobar[fifo_q->head & fifo_q->imask] =
 104                                                        (c | ENTRY_MASK) + 1;
 105                fifo_q->head += 1;
 106                ret = 1;
 107        }
 108        spin_unlock_irqrestore(&hw->fifo_lock, flags);
 109
 110        return ret;
 111}
 112
 113static int fifo_check_recv(struct ilo_hwinfo *hw, char *fifobar)
 114{
 115        struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
 116        unsigned long flags;
 117        int ret = 0;
 118        u64 c;
 119
 120        spin_lock_irqsave(&hw->fifo_lock, flags);
 121        c = fifo_q->fifobar[fifo_q->head & fifo_q->imask];
 122        if (c & ENTRY_MASK_C)
 123                ret = 1;
 124        spin_unlock_irqrestore(&hw->fifo_lock, flags);
 125
 126        return ret;
 127}
 128
 129static int ilo_pkt_enqueue(struct ilo_hwinfo *hw, struct ccb *ccb,
 130                           int dir, int id, int len)
 131{
 132        char *fifobar;
 133        int entry;
 134
 135        if (dir == SENDQ)
 136                fifobar = ccb->ccb_u1.send_fifobar;
 137        else
 138                fifobar = ccb->ccb_u3.recv_fifobar;
 139
 140        entry = mk_entry(id, len);
 141        return fifo_enqueue(hw, fifobar, entry);
 142}
 143
 144static int ilo_pkt_dequeue(struct ilo_hwinfo *hw, struct ccb *ccb,
 145                           int dir, int *id, int *len, void **pkt)
 146{
 147        char *fifobar, *desc;
 148        int entry = 0, pkt_id = 0;
 149        int ret;
 150
 151        if (dir == SENDQ) {
 152                fifobar = ccb->ccb_u1.send_fifobar;
 153                desc = ccb->ccb_u2.send_desc;
 154        } else {
 155                fifobar = ccb->ccb_u3.recv_fifobar;
 156                desc = ccb->ccb_u4.recv_desc;
 157        }
 158
 159        ret = fifo_dequeue(hw, fifobar, &entry);
 160        if (ret) {
 161                pkt_id = get_entry_id(entry);
 162                if (id)
 163                        *id = pkt_id;
 164                if (len)
 165                        *len = get_entry_len(entry);
 166                if (pkt)
 167                        *pkt = (void *)(desc + desc_mem_sz(pkt_id));
 168        }
 169
 170        return ret;
 171}
 172
 173static int ilo_pkt_recv(struct ilo_hwinfo *hw, struct ccb *ccb)
 174{
 175        char *fifobar = ccb->ccb_u3.recv_fifobar;
 176
 177        return fifo_check_recv(hw, fifobar);
 178}
 179
 180static inline void doorbell_set(struct ccb *ccb)
 181{
 182        iowrite8(1, ccb->ccb_u5.db_base);
 183}
 184
 185static inline void doorbell_clr(struct ccb *ccb)
 186{
 187        iowrite8(2, ccb->ccb_u5.db_base);
 188}
 189
 190static inline int ctrl_set(int l2sz, int idxmask, int desclim)
 191{
 192        int active = 0, go = 1;
 193        return l2sz << CTRL_BITPOS_L2SZ |
 194               idxmask << CTRL_BITPOS_FIFOINDEXMASK |
 195               desclim << CTRL_BITPOS_DESCLIMIT |
 196               active << CTRL_BITPOS_A |
 197               go << CTRL_BITPOS_G;
 198}
 199
 200static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz)
 201{
 202        /* for simplicity, use the same parameters for send and recv ctrls */
 203        ccb->send_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1);
 204        ccb->recv_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1);
 205}
 206
 207static inline int fifo_sz(int nr_entry)
 208{
 209        /* size of a fifo is determined by the number of entries it contains */
 210        return (nr_entry * sizeof(u64)) + FIFOHANDLESIZE;
 211}
 212
 213static void fifo_setup(void *base_addr, int nr_entry)
 214{
 215        struct fifo *fifo_q = base_addr;
 216        int i;
 217
 218        /* set up an empty fifo */
 219        fifo_q->head = 0;
 220        fifo_q->tail = 0;
 221        fifo_q->reset = 0;
 222        fifo_q->nrents = nr_entry;
 223        fifo_q->imask = nr_entry - 1;
 224        fifo_q->merge = ENTRY_MASK_O;
 225
 226        for (i = 0; i < nr_entry; i++)
 227                fifo_q->fifobar[i] = 0;
 228}
 229
 230static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data)
 231{
 232        struct ccb *driver_ccb = &data->driver_ccb;
 233        struct ccb __iomem *device_ccb = data->mapped_ccb;
 234        int retries;
 235
 236        /* complicated dance to tell the hw we are stopping */
 237        doorbell_clr(driver_ccb);
 238        iowrite32(ioread32(&device_ccb->send_ctrl) & ~(1 << CTRL_BITPOS_G),
 239                  &device_ccb->send_ctrl);
 240        iowrite32(ioread32(&device_ccb->recv_ctrl) & ~(1 << CTRL_BITPOS_G),
 241                  &device_ccb->recv_ctrl);
 242
 243        /* give iLO some time to process stop request */
 244        for (retries = MAX_WAIT; retries > 0; retries--) {
 245                doorbell_set(driver_ccb);
 246                udelay(WAIT_TIME);
 247                if (!(ioread32(&device_ccb->send_ctrl) & (1 << CTRL_BITPOS_A))
 248                    &&
 249                    !(ioread32(&device_ccb->recv_ctrl) & (1 << CTRL_BITPOS_A)))
 250                        break;
 251        }
 252        if (retries == 0)
 253                dev_err(&pdev->dev, "Closing, but controller still active\n");
 254
 255        /* clear the hw ccb */
 256        memset_io(device_ccb, 0, sizeof(struct ccb));
 257
 258        /* free resources used to back send/recv queues */
 259        pci_free_consistent(pdev, data->dma_size, data->dma_va, data->dma_pa);
 260}
 261
 262static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
 263{
 264        char *dma_va;
 265        dma_addr_t dma_pa;
 266        struct ccb *driver_ccb, *ilo_ccb;
 267
 268        driver_ccb = &data->driver_ccb;
 269        ilo_ccb = &data->ilo_ccb;
 270
 271        data->dma_size = 2 * fifo_sz(NR_QENTRY) +
 272                         2 * desc_mem_sz(NR_QENTRY) +
 273                         ILO_START_ALIGN + ILO_CACHE_SZ;
 274
 275        data->dma_va = pci_alloc_consistent(hw->ilo_dev, data->dma_size,
 276                                            &data->dma_pa);
 277        if (!data->dma_va)
 278                return -ENOMEM;
 279
 280        dma_va = (char *)data->dma_va;
 281        dma_pa = data->dma_pa;
 282
 283        memset(dma_va, 0, data->dma_size);
 284
 285        dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN);
 286        dma_pa = roundup(dma_pa, ILO_START_ALIGN);
 287
 288        /*
 289         * Create two ccb's, one with virt addrs, one with phys addrs.
 290         * Copy the phys addr ccb to device shared mem.
 291         */
 292        ctrl_setup(driver_ccb, NR_QENTRY, L2_QENTRY_SZ);
 293        ctrl_setup(ilo_ccb, NR_QENTRY, L2_QENTRY_SZ);
 294
 295        fifo_setup(dma_va, NR_QENTRY);
 296        driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE;
 297        ilo_ccb->ccb_u1.send_fifobar_pa = dma_pa + FIFOHANDLESIZE;
 298        dma_va += fifo_sz(NR_QENTRY);
 299        dma_pa += fifo_sz(NR_QENTRY);
 300
 301        dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ);
 302        dma_pa = roundup(dma_pa, ILO_CACHE_SZ);
 303
 304        fifo_setup(dma_va, NR_QENTRY);
 305        driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE;
 306        ilo_ccb->ccb_u3.recv_fifobar_pa = dma_pa + FIFOHANDLESIZE;
 307        dma_va += fifo_sz(NR_QENTRY);
 308        dma_pa += fifo_sz(NR_QENTRY);
 309
 310        driver_ccb->ccb_u2.send_desc = dma_va;
 311        ilo_ccb->ccb_u2.send_desc_pa = dma_pa;
 312        dma_pa += desc_mem_sz(NR_QENTRY);
 313        dma_va += desc_mem_sz(NR_QENTRY);
 314
 315        driver_ccb->ccb_u4.recv_desc = dma_va;
 316        ilo_ccb->ccb_u4.recv_desc_pa = dma_pa;
 317
 318        driver_ccb->channel = slot;
 319        ilo_ccb->channel = slot;
 320
 321        driver_ccb->ccb_u5.db_base = hw->db_vaddr + (slot << L2_DB_SIZE);
 322        ilo_ccb->ccb_u5.db_base = NULL; /* hw ccb's doorbell is not used */
 323
 324        return 0;
 325}
 326
 327static void ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
 328{
 329        int pkt_id, pkt_sz;
 330        struct ccb *driver_ccb = &data->driver_ccb;
 331
 332        /* copy the ccb with physical addrs to device memory */
 333        data->mapped_ccb = (struct ccb __iomem *)
 334                                (hw->ram_vaddr + (slot * ILOHW_CCB_SZ));
 335        memcpy_toio(data->mapped_ccb, &data->ilo_ccb, sizeof(struct ccb));
 336
 337        /* put packets on the send and receive queues */
 338        pkt_sz = 0;
 339        for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) {
 340                ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, pkt_sz);
 341                doorbell_set(driver_ccb);
 342        }
 343
 344        pkt_sz = desc_mem_sz(1);
 345        for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++)
 346                ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, pkt_sz);
 347
 348        /* the ccb is ready to use */
 349        doorbell_clr(driver_ccb);
 350}
 351
 352static int ilo_ccb_verify(struct ilo_hwinfo *hw, struct ccb_data *data)
 353{
 354        int pkt_id, i;
 355        struct ccb *driver_ccb = &data->driver_ccb;
 356
 357        /* make sure iLO is really handling requests */
 358        for (i = MAX_WAIT; i > 0; i--) {
 359                if (ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, NULL, NULL))
 360                        break;
 361                udelay(WAIT_TIME);
 362        }
 363
 364        if (i == 0) {
 365                dev_err(&hw->ilo_dev->dev, "Open could not dequeue a packet\n");
 366                return -EBUSY;
 367        }
 368
 369        ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, 0);
 370        doorbell_set(driver_ccb);
 371        return 0;
 372}
 373
 374static inline int is_channel_reset(struct ccb *ccb)
 375{
 376        /* check for this particular channel needing a reset */
 377        return FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset;
 378}
 379
 380static inline void set_channel_reset(struct ccb *ccb)
 381{
 382        /* set a flag indicating this channel needs a reset */
 383        FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset = 1;
 384}
 385
 386static inline int get_device_outbound(struct ilo_hwinfo *hw)
 387{
 388        return ioread32(&hw->mmio_vaddr[DB_OUT]);
 389}
 390
 391static inline int is_db_reset(int db_out)
 392{
 393        return db_out & (1 << DB_RESET);
 394}
 395
 396static inline int is_device_reset(struct ilo_hwinfo *hw)
 397{
 398        /* check for global reset condition */
 399        return is_db_reset(get_device_outbound(hw));
 400}
 401
 402static inline void clear_pending_db(struct ilo_hwinfo *hw, int clr)
 403{
 404        iowrite32(clr, &hw->mmio_vaddr[DB_OUT]);
 405}
 406
 407static inline void clear_device(struct ilo_hwinfo *hw)
 408{
 409        /* clear the device (reset bits, pending channel entries) */
 410        clear_pending_db(hw, -1);
 411}
 412
 413static inline void ilo_enable_interrupts(struct ilo_hwinfo *hw)
 414{
 415        iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) | 1, &hw->mmio_vaddr[DB_IRQ]);
 416}
 417
 418static inline void ilo_disable_interrupts(struct ilo_hwinfo *hw)
 419{
 420        iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) & ~1,
 421                 &hw->mmio_vaddr[DB_IRQ]);
 422}
 423
 424static void ilo_set_reset(struct ilo_hwinfo *hw)
 425{
 426        int slot;
 427
 428        /*
 429         * Mapped memory is zeroed on ilo reset, so set a per ccb flag
 430         * to indicate that this ccb needs to be closed and reopened.
 431         */
 432        for (slot = 0; slot < max_ccb; slot++) {
 433                if (!hw->ccb_alloc[slot])
 434                        continue;
 435                set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb);
 436        }
 437}
 438
 439static ssize_t ilo_read(struct file *fp, char __user *buf,
 440                        size_t len, loff_t *off)
 441{
 442        int err, found, cnt, pkt_id, pkt_len;
 443        struct ccb_data *data = fp->private_data;
 444        struct ccb *driver_ccb = &data->driver_ccb;
 445        struct ilo_hwinfo *hw = data->ilo_hw;
 446        void *pkt;
 447
 448        if (is_channel_reset(driver_ccb)) {
 449                /*
 450                 * If the device has been reset, applications
 451                 * need to close and reopen all ccbs.
 452                 */
 453                return -ENODEV;
 454        }
 455
 456        /*
 457         * This function is to be called when data is expected
 458         * in the channel, and will return an error if no packet is found
 459         * during the loop below.  The sleep/retry logic is to allow
 460         * applications to call read() immediately post write(),
 461         * and give iLO some time to process the sent packet.
 462         */
 463        cnt = 20;
 464        do {
 465                /* look for a received packet */
 466                found = ilo_pkt_dequeue(hw, driver_ccb, RECVQ, &pkt_id,
 467                                        &pkt_len, &pkt);
 468                if (found)
 469                        break;
 470                cnt--;
 471                msleep(100);
 472        } while (!found && cnt);
 473
 474        if (!found)
 475                return -EAGAIN;
 476
 477        /* only copy the length of the received packet */
 478        if (pkt_len < len)
 479                len = pkt_len;
 480
 481        err = copy_to_user(buf, pkt, len);
 482
 483        /* return the received packet to the queue */
 484        ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, desc_mem_sz(1));
 485
 486        return err ? -EFAULT : len;
 487}
 488
 489static ssize_t ilo_write(struct file *fp, const char __user *buf,
 490                         size_t len, loff_t *off)
 491{
 492        int err, pkt_id, pkt_len;
 493        struct ccb_data *data = fp->private_data;
 494        struct ccb *driver_ccb = &data->driver_ccb;
 495        struct ilo_hwinfo *hw = data->ilo_hw;
 496        void *pkt;
 497
 498        if (is_channel_reset(driver_ccb))
 499                return -ENODEV;
 500
 501        /* get a packet to send the user command */
 502        if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt))
 503                return -EBUSY;
 504
 505        /* limit the length to the length of the packet */
 506        if (pkt_len < len)
 507                len = pkt_len;
 508
 509        /* on failure, set the len to 0 to return empty packet to the device */
 510        err = copy_from_user(pkt, buf, len);
 511        if (err)
 512                len = 0;
 513
 514        /* send the packet */
 515        ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, len);
 516        doorbell_set(driver_ccb);
 517
 518        return err ? -EFAULT : len;
 519}
 520
 521static __poll_t ilo_poll(struct file *fp, poll_table *wait)
 522{
 523        struct ccb_data *data = fp->private_data;
 524        struct ccb *driver_ccb = &data->driver_ccb;
 525
 526        poll_wait(fp, &data->ccb_waitq, wait);
 527
 528        if (is_channel_reset(driver_ccb))
 529                return EPOLLERR;
 530        else if (ilo_pkt_recv(data->ilo_hw, driver_ccb))
 531                return EPOLLIN | EPOLLRDNORM;
 532
 533        return 0;
 534}
 535
 536static int ilo_close(struct inode *ip, struct file *fp)
 537{
 538        int slot;
 539        struct ccb_data *data;
 540        struct ilo_hwinfo *hw;
 541        unsigned long flags;
 542
 543        slot = iminor(ip) % max_ccb;
 544        hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
 545
 546        spin_lock(&hw->open_lock);
 547
 548        if (hw->ccb_alloc[slot]->ccb_cnt == 1) {
 549
 550                data = fp->private_data;
 551
 552                spin_lock_irqsave(&hw->alloc_lock, flags);
 553                hw->ccb_alloc[slot] = NULL;
 554                spin_unlock_irqrestore(&hw->alloc_lock, flags);
 555
 556                ilo_ccb_close(hw->ilo_dev, data);
 557
 558                kfree(data);
 559        } else
 560                hw->ccb_alloc[slot]->ccb_cnt--;
 561
 562        spin_unlock(&hw->open_lock);
 563
 564        return 0;
 565}
 566
 567static int ilo_open(struct inode *ip, struct file *fp)
 568{
 569        int slot, error;
 570        struct ccb_data *data;
 571        struct ilo_hwinfo *hw;
 572        unsigned long flags;
 573
 574        slot = iminor(ip) % max_ccb;
 575        hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
 576
 577        /* new ccb allocation */
 578        data = kzalloc(sizeof(*data), GFP_KERNEL);
 579        if (!data)
 580                return -ENOMEM;
 581
 582        spin_lock(&hw->open_lock);
 583
 584        /* each fd private_data holds sw/hw view of ccb */
 585        if (hw->ccb_alloc[slot] == NULL) {
 586                /* create a channel control block for this minor */
 587                error = ilo_ccb_setup(hw, data, slot);
 588                if (error) {
 589                        kfree(data);
 590                        goto out;
 591                }
 592
 593                data->ccb_cnt = 1;
 594                data->ccb_excl = fp->f_flags & O_EXCL;
 595                data->ilo_hw = hw;
 596                init_waitqueue_head(&data->ccb_waitq);
 597
 598                /* write the ccb to hw */
 599                spin_lock_irqsave(&hw->alloc_lock, flags);
 600                ilo_ccb_open(hw, data, slot);
 601                hw->ccb_alloc[slot] = data;
 602                spin_unlock_irqrestore(&hw->alloc_lock, flags);
 603
 604                /* make sure the channel is functional */
 605                error = ilo_ccb_verify(hw, data);
 606                if (error) {
 607
 608                        spin_lock_irqsave(&hw->alloc_lock, flags);
 609                        hw->ccb_alloc[slot] = NULL;
 610                        spin_unlock_irqrestore(&hw->alloc_lock, flags);
 611
 612                        ilo_ccb_close(hw->ilo_dev, data);
 613
 614                        kfree(data);
 615                        goto out;
 616                }
 617
 618        } else {
 619                kfree(data);
 620                if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) {
 621                        /*
 622                         * The channel exists, and either this open
 623                         * or a previous open of this channel wants
 624                         * exclusive access.
 625                         */
 626                        error = -EBUSY;
 627                } else {
 628                        hw->ccb_alloc[slot]->ccb_cnt++;
 629                        error = 0;
 630                }
 631        }
 632out:
 633        spin_unlock(&hw->open_lock);
 634
 635        if (!error)
 636                fp->private_data = hw->ccb_alloc[slot];
 637
 638        return error;
 639}
 640
 641static const struct file_operations ilo_fops = {
 642        .owner          = THIS_MODULE,
 643        .read           = ilo_read,
 644        .write          = ilo_write,
 645        .poll           = ilo_poll,
 646        .open           = ilo_open,
 647        .release        = ilo_close,
 648        .llseek         = noop_llseek,
 649};
 650
 651static irqreturn_t ilo_isr(int irq, void *data)
 652{
 653        struct ilo_hwinfo *hw = data;
 654        int pending, i;
 655
 656        spin_lock(&hw->alloc_lock);
 657
 658        /* check for ccbs which have data */
 659        pending = get_device_outbound(hw);
 660        if (!pending) {
 661                spin_unlock(&hw->alloc_lock);
 662                return IRQ_NONE;
 663        }
 664
 665        if (is_db_reset(pending)) {
 666                /* wake up all ccbs if the device was reset */
 667                pending = -1;
 668                ilo_set_reset(hw);
 669        }
 670
 671        for (i = 0; i < max_ccb; i++) {
 672                if (!hw->ccb_alloc[i])
 673                        continue;
 674                if (pending & (1 << i))
 675                        wake_up_interruptible(&hw->ccb_alloc[i]->ccb_waitq);
 676        }
 677
 678        /* clear the device of the channels that have been handled */
 679        clear_pending_db(hw, pending);
 680
 681        spin_unlock(&hw->alloc_lock);
 682
 683        return IRQ_HANDLED;
 684}
 685
 686static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
 687{
 688        pci_iounmap(pdev, hw->db_vaddr);
 689        pci_iounmap(pdev, hw->ram_vaddr);
 690        pci_iounmap(pdev, hw->mmio_vaddr);
 691}
 692
 693static int ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
 694{
 695        int bar;
 696        unsigned long off;
 697
 698        /* map the memory mapped i/o registers */
 699        hw->mmio_vaddr = pci_iomap(pdev, 1, 0);
 700        if (hw->mmio_vaddr == NULL) {
 701                dev_err(&pdev->dev, "Error mapping mmio\n");
 702                goto out;
 703        }
 704
 705        /* map the adapter shared memory region */
 706        if (pdev->subsystem_device == 0x00E4) {
 707                bar = 5;
 708                /* Last 8k is reserved for CCBs */
 709                off = pci_resource_len(pdev, bar) - 0x2000;
 710        } else {
 711                bar = 2;
 712                off = 0;
 713        }
 714        hw->ram_vaddr = pci_iomap_range(pdev, bar, off, max_ccb * ILOHW_CCB_SZ);
 715        if (hw->ram_vaddr == NULL) {
 716                dev_err(&pdev->dev, "Error mapping shared mem\n");
 717                goto mmio_free;
 718        }
 719
 720        /* map the doorbell aperture */
 721        hw->db_vaddr = pci_iomap(pdev, 3, max_ccb * ONE_DB_SIZE);
 722        if (hw->db_vaddr == NULL) {
 723                dev_err(&pdev->dev, "Error mapping doorbell\n");
 724                goto ram_free;
 725        }
 726
 727        return 0;
 728ram_free:
 729        pci_iounmap(pdev, hw->ram_vaddr);
 730mmio_free:
 731        pci_iounmap(pdev, hw->mmio_vaddr);
 732out:
 733        return -ENOMEM;
 734}
 735
 736static void ilo_remove(struct pci_dev *pdev)
 737{
 738        int i, minor;
 739        struct ilo_hwinfo *ilo_hw = pci_get_drvdata(pdev);
 740
 741        if (!ilo_hw)
 742                return;
 743
 744        clear_device(ilo_hw);
 745
 746        minor = MINOR(ilo_hw->cdev.dev);
 747        for (i = minor; i < minor + max_ccb; i++)
 748                device_destroy(ilo_class, MKDEV(ilo_major, i));
 749
 750        cdev_del(&ilo_hw->cdev);
 751        ilo_disable_interrupts(ilo_hw);
 752        free_irq(pdev->irq, ilo_hw);
 753        ilo_unmap_device(pdev, ilo_hw);
 754        pci_release_regions(pdev);
 755        /*
 756         * pci_disable_device(pdev) used to be here. But this PCI device has
 757         * two functions with interrupt lines connected to a single pin. The
 758         * other one is a USB host controller. So when we disable the PIN here
 759         * e.g. by rmmod hpilo, the controller stops working. It is because
 760         * the interrupt link is disabled in ACPI since it is not refcounted
 761         * yet. See acpi_pci_link_free_irq called from acpi_pci_irq_disable.
 762         */
 763        kfree(ilo_hw);
 764        ilo_hwdev[(minor / max_ccb)] = 0;
 765}
 766
 767static int ilo_probe(struct pci_dev *pdev,
 768                               const struct pci_device_id *ent)
 769{
 770        int devnum, minor, start, error = 0;
 771        struct ilo_hwinfo *ilo_hw;
 772
 773        if (pci_match_id(ilo_blacklist, pdev)) {
 774                dev_dbg(&pdev->dev, "Not supported on this device\n");
 775                return -ENODEV;
 776        }
 777
 778        if (max_ccb > MAX_CCB)
 779                max_ccb = MAX_CCB;
 780        else if (max_ccb < MIN_CCB)
 781                max_ccb = MIN_CCB;
 782
 783        /* find a free range for device files */
 784        for (devnum = 0; devnum < MAX_ILO_DEV; devnum++) {
 785                if (ilo_hwdev[devnum] == 0) {
 786                        ilo_hwdev[devnum] = 1;
 787                        break;
 788                }
 789        }
 790
 791        if (devnum == MAX_ILO_DEV) {
 792                dev_err(&pdev->dev, "Error finding free device\n");
 793                return -ENODEV;
 794        }
 795
 796        /* track global allocations for this device */
 797        error = -ENOMEM;
 798        ilo_hw = kzalloc(sizeof(*ilo_hw), GFP_KERNEL);
 799        if (!ilo_hw)
 800                goto out;
 801
 802        ilo_hw->ilo_dev = pdev;
 803        spin_lock_init(&ilo_hw->alloc_lock);
 804        spin_lock_init(&ilo_hw->fifo_lock);
 805        spin_lock_init(&ilo_hw->open_lock);
 806
 807        error = pci_enable_device(pdev);
 808        if (error)
 809                goto free;
 810
 811        pci_set_master(pdev);
 812
 813        error = pci_request_regions(pdev, ILO_NAME);
 814        if (error)
 815                goto disable;
 816
 817        error = ilo_map_device(pdev, ilo_hw);
 818        if (error)
 819                goto free_regions;
 820
 821        pci_set_drvdata(pdev, ilo_hw);
 822        clear_device(ilo_hw);
 823
 824        error = request_irq(pdev->irq, ilo_isr, IRQF_SHARED, "hpilo", ilo_hw);
 825        if (error)
 826                goto unmap;
 827
 828        ilo_enable_interrupts(ilo_hw);
 829
 830        cdev_init(&ilo_hw->cdev, &ilo_fops);
 831        ilo_hw->cdev.owner = THIS_MODULE;
 832        start = devnum * max_ccb;
 833        error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), max_ccb);
 834        if (error) {
 835                dev_err(&pdev->dev, "Could not add cdev\n");
 836                goto remove_isr;
 837        }
 838
 839        for (minor = 0 ; minor < max_ccb; minor++) {
 840                struct device *dev;
 841                dev = device_create(ilo_class, &pdev->dev,
 842                                    MKDEV(ilo_major, minor), NULL,
 843                                    "hpilo!d%dccb%d", devnum, minor);
 844                if (IS_ERR(dev))
 845                        dev_err(&pdev->dev, "Could not create files\n");
 846        }
 847
 848        return 0;
 849remove_isr:
 850        ilo_disable_interrupts(ilo_hw);
 851        free_irq(pdev->irq, ilo_hw);
 852unmap:
 853        ilo_unmap_device(pdev, ilo_hw);
 854free_regions:
 855        pci_release_regions(pdev);
 856disable:
 857/*      pci_disable_device(pdev);  see comment in ilo_remove */
 858free:
 859        kfree(ilo_hw);
 860out:
 861        ilo_hwdev[devnum] = 0;
 862        return error;
 863}
 864
 865static const struct pci_device_id ilo_devices[] = {
 866        { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB204) },
 867        { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3307) },
 868        { }
 869};
 870MODULE_DEVICE_TABLE(pci, ilo_devices);
 871
 872static struct pci_driver ilo_driver = {
 873        .name     = ILO_NAME,
 874        .id_table = ilo_devices,
 875        .probe    = ilo_probe,
 876        .remove   = ilo_remove,
 877};
 878
 879static int __init ilo_init(void)
 880{
 881        int error;
 882        dev_t dev;
 883
 884        ilo_class = class_create(THIS_MODULE, "iLO");
 885        if (IS_ERR(ilo_class)) {
 886                error = PTR_ERR(ilo_class);
 887                goto out;
 888        }
 889
 890        error = alloc_chrdev_region(&dev, 0, MAX_OPEN, ILO_NAME);
 891        if (error)
 892                goto class_destroy;
 893
 894        ilo_major = MAJOR(dev);
 895
 896        error = pci_register_driver(&ilo_driver);
 897        if (error)
 898                goto chr_remove;
 899
 900        return 0;
 901chr_remove:
 902        unregister_chrdev_region(dev, MAX_OPEN);
 903class_destroy:
 904        class_destroy(ilo_class);
 905out:
 906        return error;
 907}
 908
 909static void __exit ilo_exit(void)
 910{
 911        pci_unregister_driver(&ilo_driver);
 912        unregister_chrdev_region(MKDEV(ilo_major, 0), MAX_OPEN);
 913        class_destroy(ilo_class);
 914}
 915
 916MODULE_VERSION("1.5.0");
 917MODULE_ALIAS(ILO_NAME);
 918MODULE_DESCRIPTION(ILO_NAME);
 919MODULE_AUTHOR("David Altobelli <david.altobelli@hpe.com>");
 920MODULE_LICENSE("GPL v2");
 921
 922module_param(max_ccb, uint, 0444);
 923MODULE_PARM_DESC(max_ccb, "Maximum number of HP iLO channels to attach (8-24)(default=16)");
 924
 925module_init(ilo_init);
 926module_exit(ilo_exit);
 927