linux/drivers/misc/hpilo.c
<<
>>
Prefs
   1/*
   2 * Driver for the HP iLO management processor.
   3 *
   4 * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
   5 *      David Altobelli <david.altobelli@hp.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#include <linux/kernel.h>
  12#include <linux/types.h>
  13#include <linux/module.h>
  14#include <linux/fs.h>
  15#include <linux/pci.h>
  16#include <linux/interrupt.h>
  17#include <linux/ioport.h>
  18#include <linux/device.h>
  19#include <linux/file.h>
  20#include <linux/cdev.h>
  21#include <linux/sched.h>
  22#include <linux/spinlock.h>
  23#include <linux/delay.h>
  24#include <linux/uaccess.h>
  25#include <linux/io.h>
  26#include <linux/wait.h>
  27#include <linux/poll.h>
  28#include <linux/slab.h>
  29#include "hpilo.h"
  30
  31static struct class *ilo_class;
  32static unsigned int ilo_major;
  33static char ilo_hwdev[MAX_ILO_DEV];
  34
  35static inline int get_entry_id(int entry)
  36{
  37        return (entry & ENTRY_MASK_DESCRIPTOR) >> ENTRY_BITPOS_DESCRIPTOR;
  38}
  39
  40static inline int get_entry_len(int entry)
  41{
  42        return ((entry & ENTRY_MASK_QWORDS) >> ENTRY_BITPOS_QWORDS) << 3;
  43}
  44
  45static inline int mk_entry(int id, int len)
  46{
  47        int qlen = len & 7 ? (len >> 3) + 1 : len >> 3;
  48        return id << ENTRY_BITPOS_DESCRIPTOR | qlen << ENTRY_BITPOS_QWORDS;
  49}
  50
  51static inline int desc_mem_sz(int nr_entry)
  52{
  53        return nr_entry << L2_QENTRY_SZ;
  54}
  55
  56/*
  57 * FIFO queues, shared with hardware.
  58 *
  59 * If a queue has empty slots, an entry is added to the queue tail,
  60 * and that entry is marked as occupied.
  61 * Entries can be dequeued from the head of the list, when the device
  62 * has marked the entry as consumed.
  63 *
  64 * Returns true on successful queue/dequeue, false on failure.
  65 */
  66static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry)
  67{
  68        struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
  69        unsigned long flags;
  70        int ret = 0;
  71
  72        spin_lock_irqsave(&hw->fifo_lock, flags);
  73        if (!(fifo_q->fifobar[(fifo_q->tail + 1) & fifo_q->imask]
  74              & ENTRY_MASK_O)) {
  75                fifo_q->fifobar[fifo_q->tail & fifo_q->imask] |=
  76                                (entry & ENTRY_MASK_NOSTATE) | fifo_q->merge;
  77                fifo_q->tail += 1;
  78                ret = 1;
  79        }
  80        spin_unlock_irqrestore(&hw->fifo_lock, flags);
  81
  82        return ret;
  83}
  84
  85static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry)
  86{
  87        struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
  88        unsigned long flags;
  89        int ret = 0;
  90        u64 c;
  91
  92        spin_lock_irqsave(&hw->fifo_lock, flags);
  93        c = fifo_q->fifobar[fifo_q->head & fifo_q->imask];
  94        if (c & ENTRY_MASK_C) {
  95                if (entry)
  96                        *entry = c & ENTRY_MASK_NOSTATE;
  97
  98                fifo_q->fifobar[fifo_q->head & fifo_q->imask] =
  99                                                        (c | ENTRY_MASK) + 1;
 100                fifo_q->head += 1;
 101                ret = 1;
 102        }
 103        spin_unlock_irqrestore(&hw->fifo_lock, flags);
 104
 105        return ret;
 106}
 107
 108static int fifo_check_recv(struct ilo_hwinfo *hw, char *fifobar)
 109{
 110        struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
 111        unsigned long flags;
 112        int ret = 0;
 113        u64 c;
 114
 115        spin_lock_irqsave(&hw->fifo_lock, flags);
 116        c = fifo_q->fifobar[fifo_q->head & fifo_q->imask];
 117        if (c & ENTRY_MASK_C)
 118                ret = 1;
 119        spin_unlock_irqrestore(&hw->fifo_lock, flags);
 120
 121        return ret;
 122}
 123
 124static int ilo_pkt_enqueue(struct ilo_hwinfo *hw, struct ccb *ccb,
 125                           int dir, int id, int len)
 126{
 127        char *fifobar;
 128        int entry;
 129
 130        if (dir == SENDQ)
 131                fifobar = ccb->ccb_u1.send_fifobar;
 132        else
 133                fifobar = ccb->ccb_u3.recv_fifobar;
 134
 135        entry = mk_entry(id, len);
 136        return fifo_enqueue(hw, fifobar, entry);
 137}
 138
 139static int ilo_pkt_dequeue(struct ilo_hwinfo *hw, struct ccb *ccb,
 140                           int dir, int *id, int *len, void **pkt)
 141{
 142        char *fifobar, *desc;
 143        int entry = 0, pkt_id = 0;
 144        int ret;
 145
 146        if (dir == SENDQ) {
 147                fifobar = ccb->ccb_u1.send_fifobar;
 148                desc = ccb->ccb_u2.send_desc;
 149        } else {
 150                fifobar = ccb->ccb_u3.recv_fifobar;
 151                desc = ccb->ccb_u4.recv_desc;
 152        }
 153
 154        ret = fifo_dequeue(hw, fifobar, &entry);
 155        if (ret) {
 156                pkt_id = get_entry_id(entry);
 157                if (id)
 158                        *id = pkt_id;
 159                if (len)
 160                        *len = get_entry_len(entry);
 161                if (pkt)
 162                        *pkt = (void *)(desc + desc_mem_sz(pkt_id));
 163        }
 164
 165        return ret;
 166}
 167
 168static int ilo_pkt_recv(struct ilo_hwinfo *hw, struct ccb *ccb)
 169{
 170        char *fifobar = ccb->ccb_u3.recv_fifobar;
 171
 172        return fifo_check_recv(hw, fifobar);
 173}
 174
 175static inline void doorbell_set(struct ccb *ccb)
 176{
 177        iowrite8(1, ccb->ccb_u5.db_base);
 178}
 179
 180static inline void doorbell_clr(struct ccb *ccb)
 181{
 182        iowrite8(2, ccb->ccb_u5.db_base);
 183}
 184
 185static inline int ctrl_set(int l2sz, int idxmask, int desclim)
 186{
 187        int active = 0, go = 1;
 188        return l2sz << CTRL_BITPOS_L2SZ |
 189               idxmask << CTRL_BITPOS_FIFOINDEXMASK |
 190               desclim << CTRL_BITPOS_DESCLIMIT |
 191               active << CTRL_BITPOS_A |
 192               go << CTRL_BITPOS_G;
 193}
 194
 195static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz)
 196{
 197        /* for simplicity, use the same parameters for send and recv ctrls */
 198        ccb->send_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1);
 199        ccb->recv_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1);
 200}
 201
 202static inline int fifo_sz(int nr_entry)
 203{
 204        /* size of a fifo is determined by the number of entries it contains */
 205        return (nr_entry * sizeof(u64)) + FIFOHANDLESIZE;
 206}
 207
 208static void fifo_setup(void *base_addr, int nr_entry)
 209{
 210        struct fifo *fifo_q = base_addr;
 211        int i;
 212
 213        /* set up an empty fifo */
 214        fifo_q->head = 0;
 215        fifo_q->tail = 0;
 216        fifo_q->reset = 0;
 217        fifo_q->nrents = nr_entry;
 218        fifo_q->imask = nr_entry - 1;
 219        fifo_q->merge = ENTRY_MASK_O;
 220
 221        for (i = 0; i < nr_entry; i++)
 222                fifo_q->fifobar[i] = 0;
 223}
 224
 225static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data)
 226{
 227        struct ccb *driver_ccb = &data->driver_ccb;
 228        struct ccb __iomem *device_ccb = data->mapped_ccb;
 229        int retries;
 230
 231        /* complicated dance to tell the hw we are stopping */
 232        doorbell_clr(driver_ccb);
 233        iowrite32(ioread32(&device_ccb->send_ctrl) & ~(1 << CTRL_BITPOS_G),
 234                  &device_ccb->send_ctrl);
 235        iowrite32(ioread32(&device_ccb->recv_ctrl) & ~(1 << CTRL_BITPOS_G),
 236                  &device_ccb->recv_ctrl);
 237
 238        /* give iLO some time to process stop request */
 239        for (retries = MAX_WAIT; retries > 0; retries--) {
 240                doorbell_set(driver_ccb);
 241                udelay(WAIT_TIME);
 242                if (!(ioread32(&device_ccb->send_ctrl) & (1 << CTRL_BITPOS_A))
 243                    &&
 244                    !(ioread32(&device_ccb->recv_ctrl) & (1 << CTRL_BITPOS_A)))
 245                        break;
 246        }
 247        if (retries == 0)
 248                dev_err(&pdev->dev, "Closing, but controller still active\n");
 249
 250        /* clear the hw ccb */
 251        memset_io(device_ccb, 0, sizeof(struct ccb));
 252
 253        /* free resources used to back send/recv queues */
 254        pci_free_consistent(pdev, data->dma_size, data->dma_va, data->dma_pa);
 255}
 256
 257static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
 258{
 259        char *dma_va;
 260        dma_addr_t dma_pa;
 261        struct ccb *driver_ccb, *ilo_ccb;
 262
 263        driver_ccb = &data->driver_ccb;
 264        ilo_ccb = &data->ilo_ccb;
 265
 266        data->dma_size = 2 * fifo_sz(NR_QENTRY) +
 267                         2 * desc_mem_sz(NR_QENTRY) +
 268                         ILO_START_ALIGN + ILO_CACHE_SZ;
 269
 270        data->dma_va = pci_alloc_consistent(hw->ilo_dev, data->dma_size,
 271                                            &data->dma_pa);
 272        if (!data->dma_va)
 273                return -ENOMEM;
 274
 275        dma_va = (char *)data->dma_va;
 276        dma_pa = data->dma_pa;
 277
 278        memset(dma_va, 0, data->dma_size);
 279
 280        dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN);
 281        dma_pa = roundup(dma_pa, ILO_START_ALIGN);
 282
 283        /*
 284         * Create two ccb's, one with virt addrs, one with phys addrs.
 285         * Copy the phys addr ccb to device shared mem.
 286         */
 287        ctrl_setup(driver_ccb, NR_QENTRY, L2_QENTRY_SZ);
 288        ctrl_setup(ilo_ccb, NR_QENTRY, L2_QENTRY_SZ);
 289
 290        fifo_setup(dma_va, NR_QENTRY);
 291        driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE;
 292        ilo_ccb->ccb_u1.send_fifobar_pa = dma_pa + FIFOHANDLESIZE;
 293        dma_va += fifo_sz(NR_QENTRY);
 294        dma_pa += fifo_sz(NR_QENTRY);
 295
 296        dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ);
 297        dma_pa = roundup(dma_pa, ILO_CACHE_SZ);
 298
 299        fifo_setup(dma_va, NR_QENTRY);
 300        driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE;
 301        ilo_ccb->ccb_u3.recv_fifobar_pa = dma_pa + FIFOHANDLESIZE;
 302        dma_va += fifo_sz(NR_QENTRY);
 303        dma_pa += fifo_sz(NR_QENTRY);
 304
 305        driver_ccb->ccb_u2.send_desc = dma_va;
 306        ilo_ccb->ccb_u2.send_desc_pa = dma_pa;
 307        dma_pa += desc_mem_sz(NR_QENTRY);
 308        dma_va += desc_mem_sz(NR_QENTRY);
 309
 310        driver_ccb->ccb_u4.recv_desc = dma_va;
 311        ilo_ccb->ccb_u4.recv_desc_pa = dma_pa;
 312
 313        driver_ccb->channel = slot;
 314        ilo_ccb->channel = slot;
 315
 316        driver_ccb->ccb_u5.db_base = hw->db_vaddr + (slot << L2_DB_SIZE);
 317        ilo_ccb->ccb_u5.db_base = NULL; /* hw ccb's doorbell is not used */
 318
 319        return 0;
 320}
 321
 322static void ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
 323{
 324        int pkt_id, pkt_sz;
 325        struct ccb *driver_ccb = &data->driver_ccb;
 326
 327        /* copy the ccb with physical addrs to device memory */
 328        data->mapped_ccb = (struct ccb __iomem *)
 329                                (hw->ram_vaddr + (slot * ILOHW_CCB_SZ));
 330        memcpy_toio(data->mapped_ccb, &data->ilo_ccb, sizeof(struct ccb));
 331
 332        /* put packets on the send and receive queues */
 333        pkt_sz = 0;
 334        for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) {
 335                ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, pkt_sz);
 336                doorbell_set(driver_ccb);
 337        }
 338
 339        pkt_sz = desc_mem_sz(1);
 340        for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++)
 341                ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, pkt_sz);
 342
 343        /* the ccb is ready to use */
 344        doorbell_clr(driver_ccb);
 345}
 346
 347static int ilo_ccb_verify(struct ilo_hwinfo *hw, struct ccb_data *data)
 348{
 349        int pkt_id, i;
 350        struct ccb *driver_ccb = &data->driver_ccb;
 351
 352        /* make sure iLO is really handling requests */
 353        for (i = MAX_WAIT; i > 0; i--) {
 354                if (ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, NULL, NULL))
 355                        break;
 356                udelay(WAIT_TIME);
 357        }
 358
 359        if (i == 0) {
 360                dev_err(&hw->ilo_dev->dev, "Open could not dequeue a packet\n");
 361                return -EBUSY;
 362        }
 363
 364        ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, 0);
 365        doorbell_set(driver_ccb);
 366        return 0;
 367}
 368
 369static inline int is_channel_reset(struct ccb *ccb)
 370{
 371        /* check for this particular channel needing a reset */
 372        return FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset;
 373}
 374
 375static inline void set_channel_reset(struct ccb *ccb)
 376{
 377        /* set a flag indicating this channel needs a reset */
 378        FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset = 1;
 379}
 380
 381static inline int get_device_outbound(struct ilo_hwinfo *hw)
 382{
 383        return ioread32(&hw->mmio_vaddr[DB_OUT]);
 384}
 385
 386static inline int is_db_reset(int db_out)
 387{
 388        return db_out & (1 << DB_RESET);
 389}
 390
 391static inline int is_device_reset(struct ilo_hwinfo *hw)
 392{
 393        /* check for global reset condition */
 394        return is_db_reset(get_device_outbound(hw));
 395}
 396
 397static inline void clear_pending_db(struct ilo_hwinfo *hw, int clr)
 398{
 399        iowrite32(clr, &hw->mmio_vaddr[DB_OUT]);
 400}
 401
 402static inline void clear_device(struct ilo_hwinfo *hw)
 403{
 404        /* clear the device (reset bits, pending channel entries) */
 405        clear_pending_db(hw, -1);
 406}
 407
 408static inline void ilo_enable_interrupts(struct ilo_hwinfo *hw)
 409{
 410        iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) | 1, &hw->mmio_vaddr[DB_IRQ]);
 411}
 412
 413static inline void ilo_disable_interrupts(struct ilo_hwinfo *hw)
 414{
 415        iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) & ~1,
 416                 &hw->mmio_vaddr[DB_IRQ]);
 417}
 418
 419static void ilo_set_reset(struct ilo_hwinfo *hw)
 420{
 421        int slot;
 422
 423        /*
 424         * Mapped memory is zeroed on ilo reset, so set a per ccb flag
 425         * to indicate that this ccb needs to be closed and reopened.
 426         */
 427        for (slot = 0; slot < MAX_CCB; slot++) {
 428                if (!hw->ccb_alloc[slot])
 429                        continue;
 430                set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb);
 431        }
 432}
 433
 434static ssize_t ilo_read(struct file *fp, char __user *buf,
 435                        size_t len, loff_t *off)
 436{
 437        int err, found, cnt, pkt_id, pkt_len;
 438        struct ccb_data *data = fp->private_data;
 439        struct ccb *driver_ccb = &data->driver_ccb;
 440        struct ilo_hwinfo *hw = data->ilo_hw;
 441        void *pkt;
 442
 443        if (is_channel_reset(driver_ccb)) {
 444                /*
 445                 * If the device has been reset, applications
 446                 * need to close and reopen all ccbs.
 447                 */
 448                return -ENODEV;
 449        }
 450
 451        /*
 452         * This function is to be called when data is expected
 453         * in the channel, and will return an error if no packet is found
 454         * during the loop below.  The sleep/retry logic is to allow
 455         * applications to call read() immediately post write(),
 456         * and give iLO some time to process the sent packet.
 457         */
 458        cnt = 20;
 459        do {
 460                /* look for a received packet */
 461                found = ilo_pkt_dequeue(hw, driver_ccb, RECVQ, &pkt_id,
 462                                        &pkt_len, &pkt);
 463                if (found)
 464                        break;
 465                cnt--;
 466                msleep(100);
 467        } while (!found && cnt);
 468
 469        if (!found)
 470                return -EAGAIN;
 471
 472        /* only copy the length of the received packet */
 473        if (pkt_len < len)
 474                len = pkt_len;
 475
 476        err = copy_to_user(buf, pkt, len);
 477
 478        /* return the received packet to the queue */
 479        ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, desc_mem_sz(1));
 480
 481        return err ? -EFAULT : len;
 482}
 483
 484static ssize_t ilo_write(struct file *fp, const char __user *buf,
 485                         size_t len, loff_t *off)
 486{
 487        int err, pkt_id, pkt_len;
 488        struct ccb_data *data = fp->private_data;
 489        struct ccb *driver_ccb = &data->driver_ccb;
 490        struct ilo_hwinfo *hw = data->ilo_hw;
 491        void *pkt;
 492
 493        if (is_channel_reset(driver_ccb))
 494                return -ENODEV;
 495
 496        /* get a packet to send the user command */
 497        if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt))
 498                return -EBUSY;
 499
 500        /* limit the length to the length of the packet */
 501        if (pkt_len < len)
 502                len = pkt_len;
 503
 504        /* on failure, set the len to 0 to return empty packet to the device */
 505        err = copy_from_user(pkt, buf, len);
 506        if (err)
 507                len = 0;
 508
 509        /* send the packet */
 510        ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, len);
 511        doorbell_set(driver_ccb);
 512
 513        return err ? -EFAULT : len;
 514}
 515
 516static unsigned int ilo_poll(struct file *fp, poll_table *wait)
 517{
 518        struct ccb_data *data = fp->private_data;
 519        struct ccb *driver_ccb = &data->driver_ccb;
 520
 521        poll_wait(fp, &data->ccb_waitq, wait);
 522
 523        if (is_channel_reset(driver_ccb))
 524                return POLLERR;
 525        else if (ilo_pkt_recv(data->ilo_hw, driver_ccb))
 526                return POLLIN | POLLRDNORM;
 527
 528        return 0;
 529}
 530
 531static int ilo_close(struct inode *ip, struct file *fp)
 532{
 533        int slot;
 534        struct ccb_data *data;
 535        struct ilo_hwinfo *hw;
 536        unsigned long flags;
 537
 538        slot = iminor(ip) % MAX_CCB;
 539        hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
 540
 541        spin_lock(&hw->open_lock);
 542
 543        if (hw->ccb_alloc[slot]->ccb_cnt == 1) {
 544
 545                data = fp->private_data;
 546
 547                spin_lock_irqsave(&hw->alloc_lock, flags);
 548                hw->ccb_alloc[slot] = NULL;
 549                spin_unlock_irqrestore(&hw->alloc_lock, flags);
 550
 551                ilo_ccb_close(hw->ilo_dev, data);
 552
 553                kfree(data);
 554        } else
 555                hw->ccb_alloc[slot]->ccb_cnt--;
 556
 557        spin_unlock(&hw->open_lock);
 558
 559        return 0;
 560}
 561
 562static int ilo_open(struct inode *ip, struct file *fp)
 563{
 564        int slot, error;
 565        struct ccb_data *data;
 566        struct ilo_hwinfo *hw;
 567        unsigned long flags;
 568
 569        slot = iminor(ip) % MAX_CCB;
 570        hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
 571
 572        /* new ccb allocation */
 573        data = kzalloc(sizeof(*data), GFP_KERNEL);
 574        if (!data)
 575                return -ENOMEM;
 576
 577        spin_lock(&hw->open_lock);
 578
 579        /* each fd private_data holds sw/hw view of ccb */
 580        if (hw->ccb_alloc[slot] == NULL) {
 581                /* create a channel control block for this minor */
 582                error = ilo_ccb_setup(hw, data, slot);
 583                if (error) {
 584                        kfree(data);
 585                        goto out;
 586                }
 587
 588                data->ccb_cnt = 1;
 589                data->ccb_excl = fp->f_flags & O_EXCL;
 590                data->ilo_hw = hw;
 591                init_waitqueue_head(&data->ccb_waitq);
 592
 593                /* write the ccb to hw */
 594                spin_lock_irqsave(&hw->alloc_lock, flags);
 595                ilo_ccb_open(hw, data, slot);
 596                hw->ccb_alloc[slot] = data;
 597                spin_unlock_irqrestore(&hw->alloc_lock, flags);
 598
 599                /* make sure the channel is functional */
 600                error = ilo_ccb_verify(hw, data);
 601                if (error) {
 602
 603                        spin_lock_irqsave(&hw->alloc_lock, flags);
 604                        hw->ccb_alloc[slot] = NULL;
 605                        spin_unlock_irqrestore(&hw->alloc_lock, flags);
 606
 607                        ilo_ccb_close(hw->ilo_dev, data);
 608
 609                        kfree(data);
 610                        goto out;
 611                }
 612
 613        } else {
 614                kfree(data);
 615                if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) {
 616                        /*
 617                         * The channel exists, and either this open
 618                         * or a previous open of this channel wants
 619                         * exclusive access.
 620                         */
 621                        error = -EBUSY;
 622                } else {
 623                        hw->ccb_alloc[slot]->ccb_cnt++;
 624                        error = 0;
 625                }
 626        }
 627out:
 628        spin_unlock(&hw->open_lock);
 629
 630        if (!error)
 631                fp->private_data = hw->ccb_alloc[slot];
 632
 633        return error;
 634}
 635
 636static const struct file_operations ilo_fops = {
 637        .owner          = THIS_MODULE,
 638        .read           = ilo_read,
 639        .write          = ilo_write,
 640        .poll           = ilo_poll,
 641        .open           = ilo_open,
 642        .release        = ilo_close,
 643        .llseek         = noop_llseek,
 644};
 645
 646static irqreturn_t ilo_isr(int irq, void *data)
 647{
 648        struct ilo_hwinfo *hw = data;
 649        int pending, i;
 650
 651        spin_lock(&hw->alloc_lock);
 652
 653        /* check for ccbs which have data */
 654        pending = get_device_outbound(hw);
 655        if (!pending) {
 656                spin_unlock(&hw->alloc_lock);
 657                return IRQ_NONE;
 658        }
 659
 660        if (is_db_reset(pending)) {
 661                /* wake up all ccbs if the device was reset */
 662                pending = -1;
 663                ilo_set_reset(hw);
 664        }
 665
 666        for (i = 0; i < MAX_CCB; i++) {
 667                if (!hw->ccb_alloc[i])
 668                        continue;
 669                if (pending & (1 << i))
 670                        wake_up_interruptible(&hw->ccb_alloc[i]->ccb_waitq);
 671        }
 672
 673        /* clear the device of the channels that have been handled */
 674        clear_pending_db(hw, pending);
 675
 676        spin_unlock(&hw->alloc_lock);
 677
 678        return IRQ_HANDLED;
 679}
 680
 681static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
 682{
 683        pci_iounmap(pdev, hw->db_vaddr);
 684        pci_iounmap(pdev, hw->ram_vaddr);
 685        pci_iounmap(pdev, hw->mmio_vaddr);
 686}
 687
 688static int __devinit ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
 689{
 690        int error = -ENOMEM;
 691
 692        /* map the memory mapped i/o registers */
 693        hw->mmio_vaddr = pci_iomap(pdev, 1, 0);
 694        if (hw->mmio_vaddr == NULL) {
 695                dev_err(&pdev->dev, "Error mapping mmio\n");
 696                goto out;
 697        }
 698
 699        /* map the adapter shared memory region */
 700        hw->ram_vaddr = pci_iomap(pdev, 2, MAX_CCB * ILOHW_CCB_SZ);
 701        if (hw->ram_vaddr == NULL) {
 702                dev_err(&pdev->dev, "Error mapping shared mem\n");
 703                goto mmio_free;
 704        }
 705
 706        /* map the doorbell aperture */
 707        hw->db_vaddr = pci_iomap(pdev, 3, MAX_CCB * ONE_DB_SIZE);
 708        if (hw->db_vaddr == NULL) {
 709                dev_err(&pdev->dev, "Error mapping doorbell\n");
 710                goto ram_free;
 711        }
 712
 713        return 0;
 714ram_free:
 715        pci_iounmap(pdev, hw->ram_vaddr);
 716mmio_free:
 717        pci_iounmap(pdev, hw->mmio_vaddr);
 718out:
 719        return error;
 720}
 721
 722static void ilo_remove(struct pci_dev *pdev)
 723{
 724        int i, minor;
 725        struct ilo_hwinfo *ilo_hw = pci_get_drvdata(pdev);
 726
 727        clear_device(ilo_hw);
 728
 729        minor = MINOR(ilo_hw->cdev.dev);
 730        for (i = minor; i < minor + MAX_CCB; i++)
 731                device_destroy(ilo_class, MKDEV(ilo_major, i));
 732
 733        cdev_del(&ilo_hw->cdev);
 734        ilo_disable_interrupts(ilo_hw);
 735        free_irq(pdev->irq, ilo_hw);
 736        ilo_unmap_device(pdev, ilo_hw);
 737        pci_release_regions(pdev);
 738        pci_disable_device(pdev);
 739        kfree(ilo_hw);
 740        ilo_hwdev[(minor / MAX_CCB)] = 0;
 741}
 742
 743static int __devinit ilo_probe(struct pci_dev *pdev,
 744                               const struct pci_device_id *ent)
 745{
 746        int devnum, minor, start, error;
 747        struct ilo_hwinfo *ilo_hw;
 748
 749        /* find a free range for device files */
 750        for (devnum = 0; devnum < MAX_ILO_DEV; devnum++) {
 751                if (ilo_hwdev[devnum] == 0) {
 752                        ilo_hwdev[devnum] = 1;
 753                        break;
 754                }
 755        }
 756
 757        if (devnum == MAX_ILO_DEV) {
 758                dev_err(&pdev->dev, "Error finding free device\n");
 759                return -ENODEV;
 760        }
 761
 762        /* track global allocations for this device */
 763        error = -ENOMEM;
 764        ilo_hw = kzalloc(sizeof(*ilo_hw), GFP_KERNEL);
 765        if (!ilo_hw)
 766                goto out;
 767
 768        ilo_hw->ilo_dev = pdev;
 769        spin_lock_init(&ilo_hw->alloc_lock);
 770        spin_lock_init(&ilo_hw->fifo_lock);
 771        spin_lock_init(&ilo_hw->open_lock);
 772
 773        error = pci_enable_device(pdev);
 774        if (error)
 775                goto free;
 776
 777        pci_set_master(pdev);
 778
 779        error = pci_request_regions(pdev, ILO_NAME);
 780        if (error)
 781                goto disable;
 782
 783        error = ilo_map_device(pdev, ilo_hw);
 784        if (error)
 785                goto free_regions;
 786
 787        pci_set_drvdata(pdev, ilo_hw);
 788        clear_device(ilo_hw);
 789
 790        error = request_irq(pdev->irq, ilo_isr, IRQF_SHARED, "hpilo", ilo_hw);
 791        if (error)
 792                goto unmap;
 793
 794        ilo_enable_interrupts(ilo_hw);
 795
 796        cdev_init(&ilo_hw->cdev, &ilo_fops);
 797        ilo_hw->cdev.owner = THIS_MODULE;
 798        start = devnum * MAX_CCB;
 799        error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), MAX_CCB);
 800        if (error) {
 801                dev_err(&pdev->dev, "Could not add cdev\n");
 802                goto remove_isr;
 803        }
 804
 805        for (minor = 0 ; minor < MAX_CCB; minor++) {
 806                struct device *dev;
 807                dev = device_create(ilo_class, &pdev->dev,
 808                                    MKDEV(ilo_major, minor), NULL,
 809                                    "hpilo!d%dccb%d", devnum, minor);
 810                if (IS_ERR(dev))
 811                        dev_err(&pdev->dev, "Could not create files\n");
 812        }
 813
 814        return 0;
 815remove_isr:
 816        ilo_disable_interrupts(ilo_hw);
 817        free_irq(pdev->irq, ilo_hw);
 818unmap:
 819        ilo_unmap_device(pdev, ilo_hw);
 820free_regions:
 821        pci_release_regions(pdev);
 822disable:
 823        pci_disable_device(pdev);
 824free:
 825        kfree(ilo_hw);
 826out:
 827        ilo_hwdev[devnum] = 0;
 828        return error;
 829}
 830
 831static struct pci_device_id ilo_devices[] = {
 832        { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB204) },
 833        { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3307) },
 834        { }
 835};
 836MODULE_DEVICE_TABLE(pci, ilo_devices);
 837
 838static struct pci_driver ilo_driver = {
 839        .name     = ILO_NAME,
 840        .id_table = ilo_devices,
 841        .probe    = ilo_probe,
 842        .remove   = __devexit_p(ilo_remove),
 843};
 844
 845static int __init ilo_init(void)
 846{
 847        int error;
 848        dev_t dev;
 849
 850        ilo_class = class_create(THIS_MODULE, "iLO");
 851        if (IS_ERR(ilo_class)) {
 852                error = PTR_ERR(ilo_class);
 853                goto out;
 854        }
 855
 856        error = alloc_chrdev_region(&dev, 0, MAX_OPEN, ILO_NAME);
 857        if (error)
 858                goto class_destroy;
 859
 860        ilo_major = MAJOR(dev);
 861
 862        error = pci_register_driver(&ilo_driver);
 863        if (error)
 864                goto chr_remove;
 865
 866        return 0;
 867chr_remove:
 868        unregister_chrdev_region(dev, MAX_OPEN);
 869class_destroy:
 870        class_destroy(ilo_class);
 871out:
 872        return error;
 873}
 874
 875static void __exit ilo_exit(void)
 876{
 877        pci_unregister_driver(&ilo_driver);
 878        unregister_chrdev_region(MKDEV(ilo_major, 0), MAX_OPEN);
 879        class_destroy(ilo_class);
 880}
 881
 882MODULE_VERSION("1.2");
 883MODULE_ALIAS(ILO_NAME);
 884MODULE_DESCRIPTION(ILO_NAME);
 885MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>");
 886MODULE_LICENSE("GPL v2");
 887
 888module_init(ilo_init);
 889module_exit(ilo_exit);
 890