linux/drivers/infiniband/hw/qib/qib_file_ops.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2012 Intel Corporation. All rights reserved.
   3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
   4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include <linux/pci.h>
  36#include <linux/poll.h>
  37#include <linux/cdev.h>
  38#include <linux/swap.h>
  39#include <linux/vmalloc.h>
  40#include <linux/highmem.h>
  41#include <linux/io.h>
  42#include <linux/uio.h>
  43#include <linux/jiffies.h>
  44#include <asm/pgtable.h>
  45#include <linux/delay.h>
  46#include <linux/export.h>
  47
  48#include "qib.h"
  49#include "qib_common.h"
  50#include "qib_user_sdma.h"
  51
  52#undef pr_fmt
  53#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
  54
  55static int qib_open(struct inode *, struct file *);
  56static int qib_close(struct inode *, struct file *);
  57static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
  58static ssize_t qib_aio_write(struct kiocb *, const struct iovec *,
  59                             unsigned long, loff_t);
  60static unsigned int qib_poll(struct file *, struct poll_table_struct *);
  61static int qib_mmapf(struct file *, struct vm_area_struct *);
  62
  63static const struct file_operations qib_file_ops = {
  64        .owner = THIS_MODULE,
  65        .write = qib_write,
  66        .aio_write = qib_aio_write,
  67        .open = qib_open,
  68        .release = qib_close,
  69        .poll = qib_poll,
  70        .mmap = qib_mmapf,
  71        .llseek = noop_llseek,
  72};
  73
  74/*
  75 * Convert kernel virtual addresses to physical addresses so they don't
  76 * potentially conflict with the chip addresses used as mmap offsets.
  77 * It doesn't really matter what mmap offset we use as long as we can
  78 * interpret it correctly.
  79 */
  80static u64 cvt_kvaddr(void *p)
  81{
  82        struct page *page;
  83        u64 paddr = 0;
  84
  85        page = vmalloc_to_page(p);
  86        if (page)
  87                paddr = page_to_pfn(page) << PAGE_SHIFT;
  88
  89        return paddr;
  90}
  91
  92static int qib_get_base_info(struct file *fp, void __user *ubase,
  93                             size_t ubase_size)
  94{
  95        struct qib_ctxtdata *rcd = ctxt_fp(fp);
  96        int ret = 0;
  97        struct qib_base_info *kinfo = NULL;
  98        struct qib_devdata *dd = rcd->dd;
  99        struct qib_pportdata *ppd = rcd->ppd;
 100        unsigned subctxt_cnt;
 101        int shared, master;
 102        size_t sz;
 103
 104        subctxt_cnt = rcd->subctxt_cnt;
 105        if (!subctxt_cnt) {
 106                shared = 0;
 107                master = 0;
 108                subctxt_cnt = 1;
 109        } else {
 110                shared = 1;
 111                master = !subctxt_fp(fp);
 112        }
 113
 114        sz = sizeof(*kinfo);
 115        /* If context sharing is not requested, allow the old size structure */
 116        if (!shared)
 117                sz -= 7 * sizeof(u64);
 118        if (ubase_size < sz) {
 119                ret = -EINVAL;
 120                goto bail;
 121        }
 122
 123        kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
 124        if (kinfo == NULL) {
 125                ret = -ENOMEM;
 126                goto bail;
 127        }
 128
 129        ret = dd->f_get_base_info(rcd, kinfo);
 130        if (ret < 0)
 131                goto bail;
 132
 133        kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
 134        kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
 135        kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
 136        kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
 137        /*
 138         * have to mmap whole thing
 139         */
 140        kinfo->spi_rcv_egrbuftotlen =
 141                rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
 142        kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
 143        kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
 144                rcd->rcvegrbuf_chunks;
 145        kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
 146        if (master)
 147                kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
 148        /*
 149         * for this use, may be cfgctxts summed over all chips that
 150         * are are configured and present
 151         */
 152        kinfo->spi_nctxts = dd->cfgctxts;
 153        /* unit (chip/board) our context is on */
 154        kinfo->spi_unit = dd->unit;
 155        kinfo->spi_port = ppd->port;
 156        /* for now, only a single page */
 157        kinfo->spi_tid_maxsize = PAGE_SIZE;
 158
 159        /*
 160         * Doing this per context, and based on the skip value, etc.  This has
 161         * to be the actual buffer size, since the protocol code treats it
 162         * as an array.
 163         *
 164         * These have to be set to user addresses in the user code via mmap.
 165         * These values are used on return to user code for the mmap target
 166         * addresses only.  For 32 bit, same 44 bit address problem, so use
 167         * the physical address, not virtual.  Before 2.6.11, using the
 168         * page_address() macro worked, but in 2.6.11, even that returns the
 169         * full 64 bit address (upper bits all 1's).  So far, using the
 170         * physical addresses (or chip offsets, for chip mapping) works, but
 171         * no doubt some future kernel release will change that, and we'll be
 172         * on to yet another method of dealing with this.
 173         * Normally only one of rcvhdr_tailaddr or rhf_offset is useful
 174         * since the chips with non-zero rhf_offset don't normally
 175         * enable tail register updates to host memory, but for testing,
 176         * both can be enabled and used.
 177         */
 178        kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys;
 179        kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys;
 180        kinfo->spi_rhf_offset = dd->rhf_offset;
 181        kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys;
 182        kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
 183        /* setup per-unit (not port) status area for user programs */
 184        kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
 185                (char *) ppd->statusp -
 186                (char *) dd->pioavailregs_dma;
 187        kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
 188        if (!shared) {
 189                kinfo->spi_piocnt = rcd->piocnt;
 190                kinfo->spi_piobufbase = (u64) rcd->piobufs;
 191                kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask);
 192        } else if (master) {
 193                kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) +
 194                                    (rcd->piocnt % subctxt_cnt);
 195                /* Master's PIO buffers are after all the slave's */
 196                kinfo->spi_piobufbase = (u64) rcd->piobufs +
 197                        dd->palign *
 198                        (rcd->piocnt - kinfo->spi_piocnt);
 199        } else {
 200                unsigned slave = subctxt_fp(fp) - 1;
 201
 202                kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt;
 203                kinfo->spi_piobufbase = (u64) rcd->piobufs +
 204                        dd->palign * kinfo->spi_piocnt * slave;
 205        }
 206
 207        if (shared) {
 208                kinfo->spi_sendbuf_status =
 209                        cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]);
 210                /* only spi_subctxt_* fields should be set in this block! */
 211                kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase);
 212
 213                kinfo->spi_subctxt_rcvegrbuf =
 214                        cvt_kvaddr(rcd->subctxt_rcvegrbuf);
 215                kinfo->spi_subctxt_rcvhdr_base =
 216                        cvt_kvaddr(rcd->subctxt_rcvhdr_base);
 217        }
 218
 219        /*
 220         * All user buffers are 2KB buffers.  If we ever support
 221         * giving 4KB buffers to user processes, this will need some
 222         * work.  Can't use piobufbase directly, because it has
 223         * both 2K and 4K buffer base values.
 224         */
 225        kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
 226                dd->palign;
 227        kinfo->spi_pioalign = dd->palign;
 228        kinfo->spi_qpair = QIB_KD_QP;
 229        /*
 230         * user mode PIO buffers are always 2KB, even when 4KB can
 231         * be received, and sent via the kernel; this is ibmaxlen
 232         * for 2K MTU.
 233         */
 234        kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
 235        kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */
 236        kinfo->spi_ctxt = rcd->ctxt;
 237        kinfo->spi_subctxt = subctxt_fp(fp);
 238        kinfo->spi_sw_version = QIB_KERN_SWVERSION;
 239        kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */
 240        kinfo->spi_hw_version = dd->revision;
 241
 242        if (master)
 243                kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER;
 244
 245        sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
 246        if (copy_to_user(ubase, kinfo, sz))
 247                ret = -EFAULT;
 248bail:
 249        kfree(kinfo);
 250        return ret;
 251}
 252
 253/**
 254 * qib_tid_update - update a context TID
 255 * @rcd: the context
 256 * @fp: the qib device file
 257 * @ti: the TID information
 258 *
 259 * The new implementation as of Oct 2004 is that the driver assigns
 260 * the tid and returns it to the caller.   To reduce search time, we
 261 * keep a cursor for each context, walking the shadow tid array to find
 262 * one that's not in use.
 263 *
 264 * For now, if we can't allocate the full list, we fail, although
 265 * in the long run, we'll allocate as many as we can, and the
 266 * caller will deal with that by trying the remaining pages later.
 267 * That means that when we fail, we have to mark the tids as not in
 268 * use again, in our shadow copy.
 269 *
 270 * It's up to the caller to free the tids when they are done.
 271 * We'll unlock the pages as they free them.
 272 *
 273 * Also, right now we are locking one page at a time, but since
 274 * the intended use of this routine is for a single group of
 275 * virtually contiguous pages, that should change to improve
 276 * performance.
 277 */
 278static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
 279                          const struct qib_tid_info *ti)
 280{
 281        int ret = 0, ntids;
 282        u32 tid, ctxttid, cnt, i, tidcnt, tidoff;
 283        u16 *tidlist;
 284        struct qib_devdata *dd = rcd->dd;
 285        u64 physaddr;
 286        unsigned long vaddr;
 287        u64 __iomem *tidbase;
 288        unsigned long tidmap[8];
 289        struct page **pagep = NULL;
 290        unsigned subctxt = subctxt_fp(fp);
 291
 292        if (!dd->pageshadow) {
 293                ret = -ENOMEM;
 294                goto done;
 295        }
 296
 297        cnt = ti->tidcnt;
 298        if (!cnt) {
 299                ret = -EFAULT;
 300                goto done;
 301        }
 302        ctxttid = rcd->ctxt * dd->rcvtidcnt;
 303        if (!rcd->subctxt_cnt) {
 304                tidcnt = dd->rcvtidcnt;
 305                tid = rcd->tidcursor;
 306                tidoff = 0;
 307        } else if (!subctxt) {
 308                tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
 309                         (dd->rcvtidcnt % rcd->subctxt_cnt);
 310                tidoff = dd->rcvtidcnt - tidcnt;
 311                ctxttid += tidoff;
 312                tid = tidcursor_fp(fp);
 313        } else {
 314                tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
 315                tidoff = tidcnt * (subctxt - 1);
 316                ctxttid += tidoff;
 317                tid = tidcursor_fp(fp);
 318        }
 319        if (cnt > tidcnt) {
 320                /* make sure it all fits in tid_pg_list */
 321                qib_devinfo(dd->pcidev,
 322                        "Process tried to allocate %u TIDs, only trying max (%u)\n",
 323                        cnt, tidcnt);
 324                cnt = tidcnt;
 325        }
 326        pagep = (struct page **) rcd->tid_pg_list;
 327        tidlist = (u16 *) &pagep[dd->rcvtidcnt];
 328        pagep += tidoff;
 329        tidlist += tidoff;
 330
 331        memset(tidmap, 0, sizeof(tidmap));
 332        /* before decrement; chip actual # */
 333        ntids = tidcnt;
 334        tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
 335                                   dd->rcvtidbase +
 336                                   ctxttid * sizeof(*tidbase));
 337
 338        /* virtual address of first page in transfer */
 339        vaddr = ti->tidvaddr;
 340        if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
 341                       cnt * PAGE_SIZE)) {
 342                ret = -EFAULT;
 343                goto done;
 344        }
 345        ret = qib_get_user_pages(vaddr, cnt, pagep);
 346        if (ret) {
 347                /*
 348                 * if (ret == -EBUSY)
 349                 * We can't continue because the pagep array won't be
 350                 * initialized. This should never happen,
 351                 * unless perhaps the user has mpin'ed the pages
 352                 * themselves.
 353                 */
 354                qib_devinfo(dd->pcidev,
 355                         "Failed to lock addr %p, %u pages: "
 356                         "errno %d\n", (void *) vaddr, cnt, -ret);
 357                goto done;
 358        }
 359        for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
 360                for (; ntids--; tid++) {
 361                        if (tid == tidcnt)
 362                                tid = 0;
 363                        if (!dd->pageshadow[ctxttid + tid])
 364                                break;
 365                }
 366                if (ntids < 0) {
 367                        /*
 368                         * Oops, wrapped all the way through their TIDs,
 369                         * and didn't have enough free; see comments at
 370                         * start of routine
 371                         */
 372                        i--;    /* last tidlist[i] not filled in */
 373                        ret = -ENOMEM;
 374                        break;
 375                }
 376                tidlist[i] = tid + tidoff;
 377                /* we "know" system pages and TID pages are same size */
 378                dd->pageshadow[ctxttid + tid] = pagep[i];
 379                dd->physshadow[ctxttid + tid] =
 380                        qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
 381                                     PCI_DMA_FROMDEVICE);
 382                /*
 383                 * don't need atomic or it's overhead
 384                 */
 385                __set_bit(tid, tidmap);
 386                physaddr = dd->physshadow[ctxttid + tid];
 387                /* PERFORMANCE: below should almost certainly be cached */
 388                dd->f_put_tid(dd, &tidbase[tid],
 389                                  RCVHQ_RCV_TYPE_EXPECTED, physaddr);
 390                /*
 391                 * don't check this tid in qib_ctxtshadow, since we
 392                 * just filled it in; start with the next one.
 393                 */
 394                tid++;
 395        }
 396
 397        if (ret) {
 398                u32 limit;
 399cleanup:
 400                /* jump here if copy out of updated info failed... */
 401                /* same code that's in qib_free_tid() */
 402                limit = sizeof(tidmap) * BITS_PER_BYTE;
 403                if (limit > tidcnt)
 404                        /* just in case size changes in future */
 405                        limit = tidcnt;
 406                tid = find_first_bit((const unsigned long *)tidmap, limit);
 407                for (; tid < limit; tid++) {
 408                        if (!test_bit(tid, tidmap))
 409                                continue;
 410                        if (dd->pageshadow[ctxttid + tid]) {
 411                                dma_addr_t phys;
 412
 413                                phys = dd->physshadow[ctxttid + tid];
 414                                dd->physshadow[ctxttid + tid] = dd->tidinvalid;
 415                                /* PERFORMANCE: below should almost certainly
 416                                 * be cached
 417                                 */
 418                                dd->f_put_tid(dd, &tidbase[tid],
 419                                              RCVHQ_RCV_TYPE_EXPECTED,
 420                                              dd->tidinvalid);
 421                                pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
 422                                               PCI_DMA_FROMDEVICE);
 423                                dd->pageshadow[ctxttid + tid] = NULL;
 424                        }
 425                }
 426                qib_release_user_pages(pagep, cnt);
 427        } else {
 428                /*
 429                 * Copy the updated array, with qib_tid's filled in, back
 430                 * to user.  Since we did the copy in already, this "should
 431                 * never fail" If it does, we have to clean up...
 432                 */
 433                if (copy_to_user((void __user *)
 434                                 (unsigned long) ti->tidlist,
 435                                 tidlist, cnt * sizeof(*tidlist))) {
 436                        ret = -EFAULT;
 437                        goto cleanup;
 438                }
 439                if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
 440                                 tidmap, sizeof tidmap)) {
 441                        ret = -EFAULT;
 442                        goto cleanup;
 443                }
 444                if (tid == tidcnt)
 445                        tid = 0;
 446                if (!rcd->subctxt_cnt)
 447                        rcd->tidcursor = tid;
 448                else
 449                        tidcursor_fp(fp) = tid;
 450        }
 451
 452done:
 453        return ret;
 454}
 455
 456/**
 457 * qib_tid_free - free a context TID
 458 * @rcd: the context
 459 * @subctxt: the subcontext
 460 * @ti: the TID info
 461 *
 462 * right now we are unlocking one page at a time, but since
 463 * the intended use of this routine is for a single group of
 464 * virtually contiguous pages, that should change to improve
 465 * performance.  We check that the TID is in range for this context
 466 * but otherwise don't check validity; if user has an error and
 467 * frees the wrong tid, it's only their own data that can thereby
 468 * be corrupted.  We do check that the TID was in use, for sanity
 469 * We always use our idea of the saved address, not the address that
 470 * they pass in to us.
 471 */
 472static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
 473                        const struct qib_tid_info *ti)
 474{
 475        int ret = 0;
 476        u32 tid, ctxttid, cnt, limit, tidcnt;
 477        struct qib_devdata *dd = rcd->dd;
 478        u64 __iomem *tidbase;
 479        unsigned long tidmap[8];
 480
 481        if (!dd->pageshadow) {
 482                ret = -ENOMEM;
 483                goto done;
 484        }
 485
 486        if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
 487                           sizeof tidmap)) {
 488                ret = -EFAULT;
 489                goto done;
 490        }
 491
 492        ctxttid = rcd->ctxt * dd->rcvtidcnt;
 493        if (!rcd->subctxt_cnt)
 494                tidcnt = dd->rcvtidcnt;
 495        else if (!subctxt) {
 496                tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
 497                         (dd->rcvtidcnt % rcd->subctxt_cnt);
 498                ctxttid += dd->rcvtidcnt - tidcnt;
 499        } else {
 500                tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
 501                ctxttid += tidcnt * (subctxt - 1);
 502        }
 503        tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
 504                                   dd->rcvtidbase +
 505                                   ctxttid * sizeof(*tidbase));
 506
 507        limit = sizeof(tidmap) * BITS_PER_BYTE;
 508        if (limit > tidcnt)
 509                /* just in case size changes in future */
 510                limit = tidcnt;
 511        tid = find_first_bit(tidmap, limit);
 512        for (cnt = 0; tid < limit; tid++) {
 513                /*
 514                 * small optimization; if we detect a run of 3 or so without
 515                 * any set, use find_first_bit again.  That's mainly to
 516                 * accelerate the case where we wrapped, so we have some at
 517                 * the beginning, and some at the end, and a big gap
 518                 * in the middle.
 519                 */
 520                if (!test_bit(tid, tidmap))
 521                        continue;
 522                cnt++;
 523                if (dd->pageshadow[ctxttid + tid]) {
 524                        struct page *p;
 525                        dma_addr_t phys;
 526
 527                        p = dd->pageshadow[ctxttid + tid];
 528                        dd->pageshadow[ctxttid + tid] = NULL;
 529                        phys = dd->physshadow[ctxttid + tid];
 530                        dd->physshadow[ctxttid + tid] = dd->tidinvalid;
 531                        /* PERFORMANCE: below should almost certainly be
 532                         * cached
 533                         */
 534                        dd->f_put_tid(dd, &tidbase[tid],
 535                                      RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
 536                        pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
 537                                       PCI_DMA_FROMDEVICE);
 538                        qib_release_user_pages(&p, 1);
 539                }
 540        }
 541done:
 542        return ret;
 543}
 544
 545/**
 546 * qib_set_part_key - set a partition key
 547 * @rcd: the context
 548 * @key: the key
 549 *
 550 * We can have up to 4 active at a time (other than the default, which is
 551 * always allowed).  This is somewhat tricky, since multiple contexts may set
 552 * the same key, so we reference count them, and clean up at exit.  All 4
 553 * partition keys are packed into a single qlogic_ib register.  It's an
 554 * error for a process to set the same pkey multiple times.  We provide no
 555 * mechanism to de-allocate a pkey at this time, we may eventually need to
 556 * do that.  I've used the atomic operations, and no locking, and only make
 557 * a single pass through what's available.  This should be more than
 558 * adequate for some time. I'll think about spinlocks or the like if and as
 559 * it's necessary.
 560 */
 561static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
 562{
 563        struct qib_pportdata *ppd = rcd->ppd;
 564        int i, any = 0, pidx = -1;
 565        u16 lkey = key & 0x7FFF;
 566        int ret;
 567
 568        if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) {
 569                /* nothing to do; this key always valid */
 570                ret = 0;
 571                goto bail;
 572        }
 573
 574        if (!lkey) {
 575                ret = -EINVAL;
 576                goto bail;
 577        }
 578
 579        /*
 580         * Set the full membership bit, because it has to be
 581         * set in the register or the packet, and it seems
 582         * cleaner to set in the register than to force all
 583         * callers to set it.
 584         */
 585        key |= 0x8000;
 586
 587        for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
 588                if (!rcd->pkeys[i] && pidx == -1)
 589                        pidx = i;
 590                if (rcd->pkeys[i] == key) {
 591                        ret = -EEXIST;
 592                        goto bail;
 593                }
 594        }
 595        if (pidx == -1) {
 596                ret = -EBUSY;
 597                goto bail;
 598        }
 599        for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
 600                if (!ppd->pkeys[i]) {
 601                        any++;
 602                        continue;
 603                }
 604                if (ppd->pkeys[i] == key) {
 605                        atomic_t *pkrefs = &ppd->pkeyrefs[i];
 606
 607                        if (atomic_inc_return(pkrefs) > 1) {
 608                                rcd->pkeys[pidx] = key;
 609                                ret = 0;
 610                                goto bail;
 611                        } else {
 612                                /*
 613                                 * lost race, decrement count, catch below
 614                                 */
 615                                atomic_dec(pkrefs);
 616                                any++;
 617                        }
 618                }
 619                if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
 620                        /*
 621                         * It makes no sense to have both the limited and
 622                         * full membership PKEY set at the same time since
 623                         * the unlimited one will disable the limited one.
 624                         */
 625                        ret = -EEXIST;
 626                        goto bail;
 627                }
 628        }
 629        if (!any) {
 630                ret = -EBUSY;
 631                goto bail;
 632        }
 633        for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
 634                if (!ppd->pkeys[i] &&
 635                    atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
 636                        rcd->pkeys[pidx] = key;
 637                        ppd->pkeys[i] = key;
 638                        (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
 639                        ret = 0;
 640                        goto bail;
 641                }
 642        }
 643        ret = -EBUSY;
 644
 645bail:
 646        return ret;
 647}
 648
 649/**
 650 * qib_manage_rcvq - manage a context's receive queue
 651 * @rcd: the context
 652 * @subctxt: the subcontext
 653 * @start_stop: action to carry out
 654 *
 655 * start_stop == 0 disables receive on the context, for use in queue
 656 * overflow conditions.  start_stop==1 re-enables, to be used to
 657 * re-init the software copy of the head register
 658 */
 659static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
 660                           int start_stop)
 661{
 662        struct qib_devdata *dd = rcd->dd;
 663        unsigned int rcvctrl_op;
 664
 665        if (subctxt)
 666                goto bail;
 667        /* atomically clear receive enable ctxt. */
 668        if (start_stop) {
 669                /*
 670                 * On enable, force in-memory copy of the tail register to
 671                 * 0, so that protocol code doesn't have to worry about
 672                 * whether or not the chip has yet updated the in-memory
 673                 * copy or not on return from the system call. The chip
 674                 * always resets it's tail register back to 0 on a
 675                 * transition from disabled to enabled.
 676                 */
 677                if (rcd->rcvhdrtail_kvaddr)
 678                        qib_clear_rcvhdrtail(rcd);
 679                rcvctrl_op = QIB_RCVCTRL_CTXT_ENB;
 680        } else
 681                rcvctrl_op = QIB_RCVCTRL_CTXT_DIS;
 682        dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
 683        /* always; new head should be equal to new tail; see above */
 684bail:
 685        return 0;
 686}
 687
 688static void qib_clean_part_key(struct qib_ctxtdata *rcd,
 689                               struct qib_devdata *dd)
 690{
 691        int i, j, pchanged = 0;
 692        u64 oldpkey;
 693        struct qib_pportdata *ppd = rcd->ppd;
 694
 695        /* for debugging only */
 696        oldpkey = (u64) ppd->pkeys[0] |
 697                ((u64) ppd->pkeys[1] << 16) |
 698                ((u64) ppd->pkeys[2] << 32) |
 699                ((u64) ppd->pkeys[3] << 48);
 700
 701        for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
 702                if (!rcd->pkeys[i])
 703                        continue;
 704                for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) {
 705                        /* check for match independent of the global bit */
 706                        if ((ppd->pkeys[j] & 0x7fff) !=
 707                            (rcd->pkeys[i] & 0x7fff))
 708                                continue;
 709                        if (atomic_dec_and_test(&ppd->pkeyrefs[j])) {
 710                                ppd->pkeys[j] = 0;
 711                                pchanged++;
 712                        }
 713                        break;
 714                }
 715                rcd->pkeys[i] = 0;
 716        }
 717        if (pchanged)
 718                (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
 719}
 720
 721/* common code for the mappings on dma_alloc_coherent mem */
 722static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
 723                        unsigned len, void *kvaddr, u32 write_ok, char *what)
 724{
 725        struct qib_devdata *dd = rcd->dd;
 726        unsigned long pfn;
 727        int ret;
 728
 729        if ((vma->vm_end - vma->vm_start) > len) {
 730                qib_devinfo(dd->pcidev,
 731                         "FAIL on %s: len %lx > %x\n", what,
 732                         vma->vm_end - vma->vm_start, len);
 733                ret = -EFAULT;
 734                goto bail;
 735        }
 736
 737        /*
 738         * shared context user code requires rcvhdrq mapped r/w, others
 739         * only allowed readonly mapping.
 740         */
 741        if (!write_ok) {
 742                if (vma->vm_flags & VM_WRITE) {
 743                        qib_devinfo(dd->pcidev,
 744                                 "%s must be mapped readonly\n", what);
 745                        ret = -EPERM;
 746                        goto bail;
 747                }
 748
 749                /* don't allow them to later change with mprotect */
 750                vma->vm_flags &= ~VM_MAYWRITE;
 751        }
 752
 753        pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
 754        ret = remap_pfn_range(vma, vma->vm_start, pfn,
 755                              len, vma->vm_page_prot);
 756        if (ret)
 757                qib_devinfo(dd->pcidev,
 758                        "%s ctxt%u mmap of %lx, %x bytes failed: %d\n",
 759                        what, rcd->ctxt, pfn, len, ret);
 760bail:
 761        return ret;
 762}
 763
 764static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
 765                     u64 ureg)
 766{
 767        unsigned long phys;
 768        unsigned long sz;
 769        int ret;
 770
 771        /*
 772         * This is real hardware, so use io_remap.  This is the mechanism
 773         * for the user process to update the head registers for their ctxt
 774         * in the chip.
 775         */
 776        sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
 777        if ((vma->vm_end - vma->vm_start) > sz) {
 778                qib_devinfo(dd->pcidev,
 779                        "FAIL mmap userreg: reqlen %lx > PAGE\n",
 780                        vma->vm_end - vma->vm_start);
 781                ret = -EFAULT;
 782        } else {
 783                phys = dd->physaddr + ureg;
 784                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 785
 786                vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
 787                ret = io_remap_pfn_range(vma, vma->vm_start,
 788                                         phys >> PAGE_SHIFT,
 789                                         vma->vm_end - vma->vm_start,
 790                                         vma->vm_page_prot);
 791        }
 792        return ret;
 793}
 794
 795static int mmap_piobufs(struct vm_area_struct *vma,
 796                        struct qib_devdata *dd,
 797                        struct qib_ctxtdata *rcd,
 798                        unsigned piobufs, unsigned piocnt)
 799{
 800        unsigned long phys;
 801        int ret;
 802
 803        /*
 804         * When we map the PIO buffers in the chip, we want to map them as
 805         * writeonly, no read possible; unfortunately, x86 doesn't allow
 806         * for this in hardware, but we still prevent users from asking
 807         * for it.
 808         */
 809        if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
 810                qib_devinfo(dd->pcidev,
 811                        "FAIL mmap piobufs: reqlen %lx > PAGE\n",
 812                         vma->vm_end - vma->vm_start);
 813                ret = -EINVAL;
 814                goto bail;
 815        }
 816
 817        phys = dd->physaddr + piobufs;
 818
 819#if defined(__powerpc__)
 820        /* There isn't a generic way to specify writethrough mappings */
 821        pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
 822        pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
 823        pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
 824#endif
 825
 826        /*
 827         * don't allow them to later change to readable with mprotect (for when
 828         * not initially mapped readable, as is normally the case)
 829         */
 830        vma->vm_flags &= ~VM_MAYREAD;
 831        vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
 832
 833        if (qib_wc_pat)
 834                vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 835
 836        ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
 837                                 vma->vm_end - vma->vm_start,
 838                                 vma->vm_page_prot);
 839bail:
 840        return ret;
 841}
 842
 843static int mmap_rcvegrbufs(struct vm_area_struct *vma,
 844                           struct qib_ctxtdata *rcd)
 845{
 846        struct qib_devdata *dd = rcd->dd;
 847        unsigned long start, size;
 848        size_t total_size, i;
 849        unsigned long pfn;
 850        int ret;
 851
 852        size = rcd->rcvegrbuf_size;
 853        total_size = rcd->rcvegrbuf_chunks * size;
 854        if ((vma->vm_end - vma->vm_start) > total_size) {
 855                qib_devinfo(dd->pcidev,
 856                        "FAIL on egr bufs: reqlen %lx > actual %lx\n",
 857                         vma->vm_end - vma->vm_start,
 858                         (unsigned long) total_size);
 859                ret = -EINVAL;
 860                goto bail;
 861        }
 862
 863        if (vma->vm_flags & VM_WRITE) {
 864                qib_devinfo(dd->pcidev,
 865                        "Can't map eager buffers as writable (flags=%lx)\n",
 866                        vma->vm_flags);
 867                ret = -EPERM;
 868                goto bail;
 869        }
 870        /* don't allow them to later change to writeable with mprotect */
 871        vma->vm_flags &= ~VM_MAYWRITE;
 872
 873        start = vma->vm_start;
 874
 875        for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) {
 876                pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
 877                ret = remap_pfn_range(vma, start, pfn, size,
 878                                      vma->vm_page_prot);
 879                if (ret < 0)
 880                        goto bail;
 881        }
 882        ret = 0;
 883
 884bail:
 885        return ret;
 886}
 887
 888/*
 889 * qib_file_vma_fault - handle a VMA page fault.
 890 */
 891static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 892{
 893        struct page *page;
 894
 895        page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
 896        if (!page)
 897                return VM_FAULT_SIGBUS;
 898
 899        get_page(page);
 900        vmf->page = page;
 901
 902        return 0;
 903}
 904
 905static struct vm_operations_struct qib_file_vm_ops = {
 906        .fault = qib_file_vma_fault,
 907};
 908
 909static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
 910                       struct qib_ctxtdata *rcd, unsigned subctxt)
 911{
 912        struct qib_devdata *dd = rcd->dd;
 913        unsigned subctxt_cnt;
 914        unsigned long len;
 915        void *addr;
 916        size_t size;
 917        int ret = 0;
 918
 919        subctxt_cnt = rcd->subctxt_cnt;
 920        size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
 921
 922        /*
 923         * Each process has all the subctxt uregbase, rcvhdrq, and
 924         * rcvegrbufs mmapped - as an array for all the processes,
 925         * and also separately for this process.
 926         */
 927        if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) {
 928                addr = rcd->subctxt_uregbase;
 929                size = PAGE_SIZE * subctxt_cnt;
 930        } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) {
 931                addr = rcd->subctxt_rcvhdr_base;
 932                size = rcd->rcvhdrq_size * subctxt_cnt;
 933        } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) {
 934                addr = rcd->subctxt_rcvegrbuf;
 935                size *= subctxt_cnt;
 936        } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase +
 937                                        PAGE_SIZE * subctxt)) {
 938                addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt;
 939                size = PAGE_SIZE;
 940        } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base +
 941                                        rcd->rcvhdrq_size * subctxt)) {
 942                addr = rcd->subctxt_rcvhdr_base +
 943                        rcd->rcvhdrq_size * subctxt;
 944                size = rcd->rcvhdrq_size;
 945        } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) {
 946                addr = rcd->user_event_mask;
 947                size = PAGE_SIZE;
 948        } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf +
 949                                        size * subctxt)) {
 950                addr = rcd->subctxt_rcvegrbuf + size * subctxt;
 951                /* rcvegrbufs are read-only on the slave */
 952                if (vma->vm_flags & VM_WRITE) {
 953                        qib_devinfo(dd->pcidev,
 954                                 "Can't map eager buffers as "
 955                                 "writable (flags=%lx)\n", vma->vm_flags);
 956                        ret = -EPERM;
 957                        goto bail;
 958                }
 959                /*
 960                 * Don't allow permission to later change to writeable
 961                 * with mprotect.
 962                 */
 963                vma->vm_flags &= ~VM_MAYWRITE;
 964        } else
 965                goto bail;
 966        len = vma->vm_end - vma->vm_start;
 967        if (len > size) {
 968                ret = -EINVAL;
 969                goto bail;
 970        }
 971
 972        vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
 973        vma->vm_ops = &qib_file_vm_ops;
 974        vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 975        ret = 1;
 976
 977bail:
 978        return ret;
 979}
 980
 981/**
 982 * qib_mmapf - mmap various structures into user space
 983 * @fp: the file pointer
 984 * @vma: the VM area
 985 *
 986 * We use this to have a shared buffer between the kernel and the user code
 987 * for the rcvhdr queue, egr buffers, and the per-context user regs and pio
 988 * buffers in the chip.  We have the open and close entries so we can bump
 989 * the ref count and keep the driver from being unloaded while still mapped.
 990 */
 991static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
 992{
 993        struct qib_ctxtdata *rcd;
 994        struct qib_devdata *dd;
 995        u64 pgaddr, ureg;
 996        unsigned piobufs, piocnt;
 997        int ret, match = 1;
 998
 999        rcd = ctxt_fp(fp);
1000        if (!rcd || !(vma->vm_flags & VM_SHARED)) {
1001                ret = -EINVAL;
1002                goto bail;
1003        }
1004        dd = rcd->dd;
1005
1006        /*
1007         * This is the qib_do_user_init() code, mapping the shared buffers
1008         * and per-context user registers into the user process. The address
1009         * referred to by vm_pgoff is the file offset passed via mmap().
1010         * For shared contexts, this is the kernel vmalloc() address of the
1011         * pages to share with the master.
1012         * For non-shared or master ctxts, this is a physical address.
1013         * We only do one mmap for each space mapped.
1014         */
1015        pgaddr = vma->vm_pgoff << PAGE_SHIFT;
1016
1017        /*
1018         * Check for 0 in case one of the allocations failed, but user
1019         * called mmap anyway.
1020         */
1021        if (!pgaddr)  {
1022                ret = -EINVAL;
1023                goto bail;
1024        }
1025
1026        /*
1027         * Physical addresses must fit in 40 bits for our hardware.
1028         * Check for kernel virtual addresses first, anything else must
1029         * match a HW or memory address.
1030         */
1031        ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
1032        if (ret) {
1033                if (ret > 0)
1034                        ret = 0;
1035                goto bail;
1036        }
1037
1038        ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
1039        if (!rcd->subctxt_cnt) {
1040                /* ctxt is not shared */
1041                piocnt = rcd->piocnt;
1042                piobufs = rcd->piobufs;
1043        } else if (!subctxt_fp(fp)) {
1044                /* caller is the master */
1045                piocnt = (rcd->piocnt / rcd->subctxt_cnt) +
1046                         (rcd->piocnt % rcd->subctxt_cnt);
1047                piobufs = rcd->piobufs +
1048                        dd->palign * (rcd->piocnt - piocnt);
1049        } else {
1050                unsigned slave = subctxt_fp(fp) - 1;
1051
1052                /* caller is a slave */
1053                piocnt = rcd->piocnt / rcd->subctxt_cnt;
1054                piobufs = rcd->piobufs + dd->palign * piocnt * slave;
1055        }
1056
1057        if (pgaddr == ureg)
1058                ret = mmap_ureg(vma, dd, ureg);
1059        else if (pgaddr == piobufs)
1060                ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
1061        else if (pgaddr == dd->pioavailregs_phys)
1062                /* in-memory copy of pioavail registers */
1063                ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1064                                   (void *) dd->pioavailregs_dma, 0,
1065                                   "pioavail registers");
1066        else if (pgaddr == rcd->rcvegr_phys)
1067                ret = mmap_rcvegrbufs(vma, rcd);
1068        else if (pgaddr == (u64) rcd->rcvhdrq_phys)
1069                /*
1070                 * The rcvhdrq itself; multiple pages, contiguous
1071                 * from an i/o perspective.  Shared contexts need
1072                 * to map r/w, so we allow writing.
1073                 */
1074                ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
1075                                   rcd->rcvhdrq, 1, "rcvhdrq");
1076        else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys)
1077                /* in-memory copy of rcvhdrq tail register */
1078                ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1079                                   rcd->rcvhdrtail_kvaddr, 0,
1080                                   "rcvhdrq tail");
1081        else
1082                match = 0;
1083        if (!match)
1084                ret = -EINVAL;
1085
1086        vma->vm_private_data = NULL;
1087
1088        if (ret < 0)
1089                qib_devinfo(dd->pcidev,
1090                         "mmap Failure %d: off %llx len %lx\n",
1091                         -ret, (unsigned long long)pgaddr,
1092                         vma->vm_end - vma->vm_start);
1093bail:
1094        return ret;
1095}
1096
1097static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
1098                                    struct file *fp,
1099                                    struct poll_table_struct *pt)
1100{
1101        struct qib_devdata *dd = rcd->dd;
1102        unsigned pollflag;
1103
1104        poll_wait(fp, &rcd->wait, pt);
1105
1106        spin_lock_irq(&dd->uctxt_lock);
1107        if (rcd->urgent != rcd->urgent_poll) {
1108                pollflag = POLLIN | POLLRDNORM;
1109                rcd->urgent_poll = rcd->urgent;
1110        } else {
1111                pollflag = 0;
1112                set_bit(QIB_CTXT_WAITING_URG, &rcd->flag);
1113        }
1114        spin_unlock_irq(&dd->uctxt_lock);
1115
1116        return pollflag;
1117}
1118
1119static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
1120                                  struct file *fp,
1121                                  struct poll_table_struct *pt)
1122{
1123        struct qib_devdata *dd = rcd->dd;
1124        unsigned pollflag;
1125
1126        poll_wait(fp, &rcd->wait, pt);
1127
1128        spin_lock_irq(&dd->uctxt_lock);
1129        if (dd->f_hdrqempty(rcd)) {
1130                set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag);
1131                dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
1132                pollflag = 0;
1133        } else
1134                pollflag = POLLIN | POLLRDNORM;
1135        spin_unlock_irq(&dd->uctxt_lock);
1136
1137        return pollflag;
1138}
1139
1140static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
1141{
1142        struct qib_ctxtdata *rcd;
1143        unsigned pollflag;
1144
1145        rcd = ctxt_fp(fp);
1146        if (!rcd)
1147                pollflag = POLLERR;
1148        else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
1149                pollflag = qib_poll_urgent(rcd, fp, pt);
1150        else  if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
1151                pollflag = qib_poll_next(rcd, fp, pt);
1152        else /* invalid */
1153                pollflag = POLLERR;
1154
1155        return pollflag;
1156}
1157
1158/*
1159 * Check that userland and driver are compatible for subcontexts.
1160 */
1161static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
1162{
1163        /* this code is written long-hand for clarity */
1164        if (QIB_USER_SWMAJOR != user_swmajor) {
1165                /* no promise of compatibility if major mismatch */
1166                return 0;
1167        }
1168        if (QIB_USER_SWMAJOR == 1) {
1169                switch (QIB_USER_SWMINOR) {
1170                case 0:
1171                case 1:
1172                case 2:
1173                        /* no subctxt implementation so cannot be compatible */
1174                        return 0;
1175                case 3:
1176                        /* 3 is only compatible with itself */
1177                        return user_swminor == 3;
1178                default:
1179                        /* >= 4 are compatible (or are expected to be) */
1180                        return user_swminor >= 4;
1181                }
1182        }
1183        /* make no promises yet for future major versions */
1184        return 0;
1185}
1186
1187static int init_subctxts(struct qib_devdata *dd,
1188                         struct qib_ctxtdata *rcd,
1189                         const struct qib_user_info *uinfo)
1190{
1191        int ret = 0;
1192        unsigned num_subctxts;
1193        size_t size;
1194
1195        /*
1196         * If the user is requesting zero subctxts,
1197         * skip the subctxt allocation.
1198         */
1199        if (uinfo->spu_subctxt_cnt <= 0)
1200                goto bail;
1201        num_subctxts = uinfo->spu_subctxt_cnt;
1202
1203        /* Check for subctxt compatibility */
1204        if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
1205                uinfo->spu_userversion & 0xffff)) {
1206                qib_devinfo(dd->pcidev,
1207                         "Mismatched user version (%d.%d) and driver "
1208                         "version (%d.%d) while context sharing. Ensure "
1209                         "that driver and library are from the same "
1210                         "release.\n",
1211                         (int) (uinfo->spu_userversion >> 16),
1212                         (int) (uinfo->spu_userversion & 0xffff),
1213                         QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
1214                goto bail;
1215        }
1216        if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
1217                ret = -EINVAL;
1218                goto bail;
1219        }
1220
1221        rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
1222        if (!rcd->subctxt_uregbase) {
1223                ret = -ENOMEM;
1224                goto bail;
1225        }
1226        /* Note: rcd->rcvhdrq_size isn't initialized yet. */
1227        size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1228                     sizeof(u32), PAGE_SIZE) * num_subctxts;
1229        rcd->subctxt_rcvhdr_base = vmalloc_user(size);
1230        if (!rcd->subctxt_rcvhdr_base) {
1231                ret = -ENOMEM;
1232                goto bail_ureg;
1233        }
1234
1235        rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
1236                                              rcd->rcvegrbuf_size *
1237                                              num_subctxts);
1238        if (!rcd->subctxt_rcvegrbuf) {
1239                ret = -ENOMEM;
1240                goto bail_rhdr;
1241        }
1242
1243        rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
1244        rcd->subctxt_id = uinfo->spu_subctxt_id;
1245        rcd->active_slaves = 1;
1246        rcd->redirect_seq_cnt = 1;
1247        set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1248        goto bail;
1249
1250bail_rhdr:
1251        vfree(rcd->subctxt_rcvhdr_base);
1252bail_ureg:
1253        vfree(rcd->subctxt_uregbase);
1254        rcd->subctxt_uregbase = NULL;
1255bail:
1256        return ret;
1257}
1258
1259static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
1260                      struct file *fp, const struct qib_user_info *uinfo)
1261{
1262        struct qib_devdata *dd = ppd->dd;
1263        struct qib_ctxtdata *rcd;
1264        void *ptmp = NULL;
1265        int ret;
1266
1267        rcd = qib_create_ctxtdata(ppd, ctxt);
1268
1269        /*
1270         * Allocate memory for use in qib_tid_update() at open to
1271         * reduce cost of expected send setup per message segment
1272         */
1273        if (rcd)
1274                ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
1275                               dd->rcvtidcnt * sizeof(struct page **),
1276                               GFP_KERNEL);
1277
1278        if (!rcd || !ptmp) {
1279                qib_dev_err(dd,
1280                        "Unable to allocate ctxtdata memory, failing open\n");
1281                ret = -ENOMEM;
1282                goto bailerr;
1283        }
1284        rcd->userversion = uinfo->spu_userversion;
1285        ret = init_subctxts(dd, rcd, uinfo);
1286        if (ret)
1287                goto bailerr;
1288        rcd->tid_pg_list = ptmp;
1289        rcd->pid = current->pid;
1290        init_waitqueue_head(&dd->rcd[ctxt]->wait);
1291        strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
1292        ctxt_fp(fp) = rcd;
1293        qib_stats.sps_ctxts++;
1294        dd->freectxts--;
1295        ret = 0;
1296        goto bail;
1297
1298bailerr:
1299        dd->rcd[ctxt] = NULL;
1300        kfree(rcd);
1301        kfree(ptmp);
1302bail:
1303        return ret;
1304}
1305
1306static inline int usable(struct qib_pportdata *ppd)
1307{
1308        struct qib_devdata *dd = ppd->dd;
1309
1310        return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
1311                (ppd->lflags & QIBL_LINKACTIVE);
1312}
1313
1314/*
1315 * Select a context on the given device, either using a requested port
1316 * or the port based on the context number.
1317 */
1318static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
1319                            const struct qib_user_info *uinfo)
1320{
1321        struct qib_pportdata *ppd = NULL;
1322        int ret, ctxt;
1323
1324        if (port) {
1325                if (!usable(dd->pport + port - 1)) {
1326                        ret = -ENETDOWN;
1327                        goto done;
1328                } else
1329                        ppd = dd->pport + port - 1;
1330        }
1331        for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt];
1332             ctxt++)
1333                ;
1334        if (ctxt == dd->cfgctxts) {
1335                ret = -EBUSY;
1336                goto done;
1337        }
1338        if (!ppd) {
1339                u32 pidx = ctxt % dd->num_pports;
1340                if (usable(dd->pport + pidx))
1341                        ppd = dd->pport + pidx;
1342                else {
1343                        for (pidx = 0; pidx < dd->num_pports && !ppd;
1344                             pidx++)
1345                                if (usable(dd->pport + pidx))
1346                                        ppd = dd->pport + pidx;
1347                }
1348        }
1349        ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN;
1350done:
1351        return ret;
1352}
1353
1354static int find_free_ctxt(int unit, struct file *fp,
1355                          const struct qib_user_info *uinfo)
1356{
1357        struct qib_devdata *dd = qib_lookup(unit);
1358        int ret;
1359
1360        if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports))
1361                ret = -ENODEV;
1362        else
1363                ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo);
1364
1365        return ret;
1366}
1367
1368static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1369                      unsigned alg)
1370{
1371        struct qib_devdata *udd = NULL;
1372        int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i;
1373        u32 port = uinfo->spu_port, ctxt;
1374
1375        devmax = qib_count_units(&npresent, &nup);
1376        if (!npresent) {
1377                ret = -ENXIO;
1378                goto done;
1379        }
1380        if (nup == 0) {
1381                ret = -ENETDOWN;
1382                goto done;
1383        }
1384
1385        if (alg == QIB_PORT_ALG_ACROSS) {
1386                unsigned inuse = ~0U;
1387                /* find device (with ACTIVE ports) with fewest ctxts in use */
1388                for (ndev = 0; ndev < devmax; ndev++) {
1389                        struct qib_devdata *dd = qib_lookup(ndev);
1390                        unsigned cused = 0, cfree = 0, pusable = 0;
1391                        if (!dd)
1392                                continue;
1393                        if (port && port <= dd->num_pports &&
1394                            usable(dd->pport + port - 1))
1395                                pusable = 1;
1396                        else
1397                                for (i = 0; i < dd->num_pports; i++)
1398                                        if (usable(dd->pport + i))
1399                                                pusable++;
1400                        if (!pusable)
1401                                continue;
1402                        for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
1403                             ctxt++)
1404                                if (dd->rcd[ctxt])
1405                                        cused++;
1406                                else
1407                                        cfree++;
1408                        if (pusable && cfree && cused < inuse) {
1409                                udd = dd;
1410                                inuse = cused;
1411                        }
1412                }
1413                if (udd) {
1414                        ret = choose_port_ctxt(fp, udd, port, uinfo);
1415                        goto done;
1416                }
1417        } else {
1418                for (ndev = 0; ndev < devmax; ndev++) {
1419                        struct qib_devdata *dd = qib_lookup(ndev);
1420                        if (dd) {
1421                                ret = choose_port_ctxt(fp, dd, port, uinfo);
1422                                if (!ret)
1423                                        goto done;
1424                                if (ret == -EBUSY)
1425                                        dusable++;
1426                        }
1427                }
1428        }
1429        ret = dusable ? -EBUSY : -ENETDOWN;
1430
1431done:
1432        return ret;
1433}
1434
1435static int find_shared_ctxt(struct file *fp,
1436                            const struct qib_user_info *uinfo)
1437{
1438        int devmax, ndev, i;
1439        int ret = 0;
1440
1441        devmax = qib_count_units(NULL, NULL);
1442
1443        for (ndev = 0; ndev < devmax; ndev++) {
1444                struct qib_devdata *dd = qib_lookup(ndev);
1445
1446                /* device portion of usable() */
1447                if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
1448                        continue;
1449                for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
1450                        struct qib_ctxtdata *rcd = dd->rcd[i];
1451
1452                        /* Skip ctxts which are not yet open */
1453                        if (!rcd || !rcd->cnt)
1454                                continue;
1455                        /* Skip ctxt if it doesn't match the requested one */
1456                        if (rcd->subctxt_id != uinfo->spu_subctxt_id)
1457                                continue;
1458                        /* Verify the sharing process matches the master */
1459                        if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt ||
1460                            rcd->userversion != uinfo->spu_userversion ||
1461                            rcd->cnt >= rcd->subctxt_cnt) {
1462                                ret = -EINVAL;
1463                                goto done;
1464                        }
1465                        ctxt_fp(fp) = rcd;
1466                        subctxt_fp(fp) = rcd->cnt++;
1467                        rcd->subpid[subctxt_fp(fp)] = current->pid;
1468                        tidcursor_fp(fp) = 0;
1469                        rcd->active_slaves |= 1 << subctxt_fp(fp);
1470                        ret = 1;
1471                        goto done;
1472                }
1473        }
1474
1475done:
1476        return ret;
1477}
1478
1479static int qib_open(struct inode *in, struct file *fp)
1480{
1481        /* The real work is performed later in qib_assign_ctxt() */
1482        fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL);
1483        if (fp->private_data) /* no cpu affinity by default */
1484                ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1;
1485        return fp->private_data ? 0 : -ENOMEM;
1486}
1487
1488/*
1489 * Get ctxt early, so can set affinity prior to memory allocation.
1490 */
1491static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
1492{
1493        int ret;
1494        int i_minor;
1495        unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS;
1496
1497        /* Check to be sure we haven't already initialized this file */
1498        if (ctxt_fp(fp)) {
1499                ret = -EINVAL;
1500                goto done;
1501        }
1502
1503        /* for now, if major version is different, bail */
1504        swmajor = uinfo->spu_userversion >> 16;
1505        if (swmajor != QIB_USER_SWMAJOR) {
1506                ret = -ENODEV;
1507                goto done;
1508        }
1509
1510        swminor = uinfo->spu_userversion & 0xffff;
1511
1512        if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT)
1513                alg = uinfo->spu_port_alg;
1514
1515        mutex_lock(&qib_mutex);
1516
1517        if (qib_compatible_subctxts(swmajor, swminor) &&
1518            uinfo->spu_subctxt_cnt) {
1519                ret = find_shared_ctxt(fp, uinfo);
1520                if (ret) {
1521                        if (ret > 0)
1522                                ret = 0;
1523                        goto done_chk_sdma;
1524                }
1525        }
1526
1527        i_minor = iminor(fp->f_dentry->d_inode) - QIB_USER_MINOR_BASE;
1528        if (i_minor)
1529                ret = find_free_ctxt(i_minor - 1, fp, uinfo);
1530        else
1531                ret = get_a_ctxt(fp, uinfo, alg);
1532
1533done_chk_sdma:
1534        if (!ret) {
1535                struct qib_filedata *fd = fp->private_data;
1536                const struct qib_ctxtdata *rcd = fd->rcd;
1537                const struct qib_devdata *dd = rcd->dd;
1538                unsigned int weight;
1539
1540                if (dd->flags & QIB_HAS_SEND_DMA) {
1541                        fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
1542                                                            dd->unit,
1543                                                            rcd->ctxt,
1544                                                            fd->subctxt);
1545                        if (!fd->pq)
1546                                ret = -ENOMEM;
1547                }
1548
1549                /*
1550                 * If process has NOT already set it's affinity, select and
1551                 * reserve a processor for it, as a rendezvous for all
1552                 * users of the driver.  If they don't actually later
1553                 * set affinity to this cpu, or set it to some other cpu,
1554                 * it just means that sooner or later we don't recommend
1555                 * a cpu, and let the scheduler do it's best.
1556                 */
1557                weight = cpumask_weight(tsk_cpus_allowed(current));
1558                if (!ret && weight >= qib_cpulist_count) {
1559                        int cpu;
1560                        cpu = find_first_zero_bit(qib_cpulist,
1561                                                  qib_cpulist_count);
1562                        if (cpu != qib_cpulist_count) {
1563                                __set_bit(cpu, qib_cpulist);
1564                                fd->rec_cpu_num = cpu;
1565                        }
1566                } else if (weight == 1 &&
1567                        test_bit(cpumask_first(tsk_cpus_allowed(current)),
1568                                 qib_cpulist))
1569                        qib_devinfo(dd->pcidev,
1570                                "%s PID %u affinity set to cpu %d; already allocated\n",
1571                                current->comm, current->pid,
1572                                cpumask_first(tsk_cpus_allowed(current)));
1573        }
1574
1575        mutex_unlock(&qib_mutex);
1576
1577done:
1578        return ret;
1579}
1580
1581
1582static int qib_do_user_init(struct file *fp,
1583                            const struct qib_user_info *uinfo)
1584{
1585        int ret;
1586        struct qib_ctxtdata *rcd = ctxt_fp(fp);
1587        struct qib_devdata *dd;
1588        unsigned uctxt;
1589
1590        /* Subctxts don't need to initialize anything since master did it. */
1591        if (subctxt_fp(fp)) {
1592                ret = wait_event_interruptible(rcd->wait,
1593                        !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
1594                goto bail;
1595        }
1596
1597        dd = rcd->dd;
1598
1599        /* some ctxts may get extra buffers, calculate that here */
1600        uctxt = rcd->ctxt - dd->first_user_ctxt;
1601        if (uctxt < dd->ctxts_extrabuf) {
1602                rcd->piocnt = dd->pbufsctxt + 1;
1603                rcd->pio_base = rcd->piocnt * uctxt;
1604        } else {
1605                rcd->piocnt = dd->pbufsctxt;
1606                rcd->pio_base = rcd->piocnt * uctxt +
1607                        dd->ctxts_extrabuf;
1608        }
1609
1610        /*
1611         * All user buffers are 2KB buffers.  If we ever support
1612         * giving 4KB buffers to user processes, this will need some
1613         * work.  Can't use piobufbase directly, because it has
1614         * both 2K and 4K buffer base values.  So check and handle.
1615         */
1616        if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
1617                if (rcd->pio_base >= dd->piobcnt2k) {
1618                        qib_dev_err(dd,
1619                                    "%u:ctxt%u: no 2KB buffers available\n",
1620                                    dd->unit, rcd->ctxt);
1621                        ret = -ENOBUFS;
1622                        goto bail;
1623                }
1624                rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
1625                qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
1626                            rcd->ctxt, rcd->piocnt);
1627        }
1628
1629        rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
1630        qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1631                               TXCHK_CHG_TYPE_USER, rcd);
1632        /*
1633         * try to ensure that processes start up with consistent avail update
1634         * for their own range, at least.   If system very quiet, it might
1635         * have the in-memory copy out of date at startup for this range of
1636         * buffers, when a context gets re-used.  Do after the chg_pioavail
1637         * and before the rest of setup, so it's "almost certain" the dma
1638         * will have occurred (can't 100% guarantee, but should be many
1639         * decimals of 9s, with this ordering), given how much else happens
1640         * after this.
1641         */
1642        dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
1643
1644        /*
1645         * Now allocate the rcvhdr Q and eager TIDs; skip the TID
1646         * array for time being.  If rcd->ctxt > chip-supported,
1647         * we need to do extra stuff here to handle by handling overflow
1648         * through ctxt 0, someday
1649         */
1650        ret = qib_create_rcvhdrq(dd, rcd);
1651        if (!ret)
1652                ret = qib_setup_eagerbufs(rcd);
1653        if (ret)
1654                goto bail_pio;
1655
1656        rcd->tidcursor = 0; /* start at beginning after open */
1657
1658        /* initialize poll variables... */
1659        rcd->urgent = 0;
1660        rcd->urgent_poll = 0;
1661
1662        /*
1663         * Now enable the ctxt for receive.
1664         * For chips that are set to DMA the tail register to memory
1665         * when they change (and when the update bit transitions from
1666         * 0 to 1.  So for those chips, we turn it off and then back on.
1667         * This will (very briefly) affect any other open ctxts, but the
1668         * duration is very short, and therefore isn't an issue.  We
1669         * explicitly set the in-memory tail copy to 0 beforehand, so we
1670         * don't have to wait to be sure the DMA update has happened
1671         * (chip resets head/tail to 0 on transition to enable).
1672         */
1673        if (rcd->rcvhdrtail_kvaddr)
1674                qib_clear_rcvhdrtail(rcd);
1675
1676        dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
1677                      rcd->ctxt);
1678
1679        /* Notify any waiting slaves */
1680        if (rcd->subctxt_cnt) {
1681                clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1682                wake_up(&rcd->wait);
1683        }
1684        return 0;
1685
1686bail_pio:
1687        qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1688                               TXCHK_CHG_TYPE_KERN, rcd);
1689bail:
1690        return ret;
1691}
1692
1693/**
1694 * unlock_exptid - unlock any expected TID entries context still had in use
1695 * @rcd: ctxt
1696 *
1697 * We don't actually update the chip here, because we do a bulk update
1698 * below, using f_clear_tids.
1699 */
1700static void unlock_expected_tids(struct qib_ctxtdata *rcd)
1701{
1702        struct qib_devdata *dd = rcd->dd;
1703        int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
1704        int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt;
1705
1706        for (i = ctxt_tidbase; i < maxtid; i++) {
1707                struct page *p = dd->pageshadow[i];
1708                dma_addr_t phys;
1709
1710                if (!p)
1711                        continue;
1712
1713                phys = dd->physshadow[i];
1714                dd->physshadow[i] = dd->tidinvalid;
1715                dd->pageshadow[i] = NULL;
1716                pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
1717                               PCI_DMA_FROMDEVICE);
1718                qib_release_user_pages(&p, 1);
1719                cnt++;
1720        }
1721}
1722
1723static int qib_close(struct inode *in, struct file *fp)
1724{
1725        int ret = 0;
1726        struct qib_filedata *fd;
1727        struct qib_ctxtdata *rcd;
1728        struct qib_devdata *dd;
1729        unsigned long flags;
1730        unsigned ctxt;
1731        pid_t pid;
1732
1733        mutex_lock(&qib_mutex);
1734
1735        fd = fp->private_data;
1736        fp->private_data = NULL;
1737        rcd = fd->rcd;
1738        if (!rcd) {
1739                mutex_unlock(&qib_mutex);
1740                goto bail;
1741        }
1742
1743        dd = rcd->dd;
1744
1745        /* ensure all pio buffer writes in progress are flushed */
1746        qib_flush_wc();
1747
1748        /* drain user sdma queue */
1749        if (fd->pq) {
1750                qib_user_sdma_queue_drain(rcd->ppd, fd->pq);
1751                qib_user_sdma_queue_destroy(fd->pq);
1752        }
1753
1754        if (fd->rec_cpu_num != -1)
1755                __clear_bit(fd->rec_cpu_num, qib_cpulist);
1756
1757        if (--rcd->cnt) {
1758                /*
1759                 * XXX If the master closes the context before the slave(s),
1760                 * revoke the mmap for the eager receive queue so
1761                 * the slave(s) don't wait for receive data forever.
1762                 */
1763                rcd->active_slaves &= ~(1 << fd->subctxt);
1764                rcd->subpid[fd->subctxt] = 0;
1765                mutex_unlock(&qib_mutex);
1766                goto bail;
1767        }
1768
1769        /* early; no interrupt users after this */
1770        spin_lock_irqsave(&dd->uctxt_lock, flags);
1771        ctxt = rcd->ctxt;
1772        dd->rcd[ctxt] = NULL;
1773        pid = rcd->pid;
1774        rcd->pid = 0;
1775        spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1776
1777        if (rcd->rcvwait_to || rcd->piowait_to ||
1778            rcd->rcvnowait || rcd->pionowait) {
1779                rcd->rcvwait_to = 0;
1780                rcd->piowait_to = 0;
1781                rcd->rcvnowait = 0;
1782                rcd->pionowait = 0;
1783        }
1784        if (rcd->flag)
1785                rcd->flag = 0;
1786
1787        if (dd->kregbase) {
1788                /* atomically clear receive enable ctxt and intr avail. */
1789                dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
1790                                  QIB_RCVCTRL_INTRAVAIL_DIS, ctxt);
1791
1792                /* clean up the pkeys for this ctxt user */
1793                qib_clean_part_key(rcd, dd);
1794                qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
1795                qib_chg_pioavailkernel(dd, rcd->pio_base,
1796                                       rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL);
1797
1798                dd->f_clear_tids(dd, rcd);
1799
1800                if (dd->pageshadow)
1801                        unlock_expected_tids(rcd);
1802                qib_stats.sps_ctxts--;
1803                dd->freectxts++;
1804        }
1805
1806        mutex_unlock(&qib_mutex);
1807        qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */
1808
1809bail:
1810        kfree(fd);
1811        return ret;
1812}
1813
1814static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
1815{
1816        struct qib_ctxt_info info;
1817        int ret;
1818        size_t sz;
1819        struct qib_ctxtdata *rcd = ctxt_fp(fp);
1820        struct qib_filedata *fd;
1821
1822        fd = fp->private_data;
1823
1824        info.num_active = qib_count_active_units();
1825        info.unit = rcd->dd->unit;
1826        info.port = rcd->ppd->port;
1827        info.ctxt = rcd->ctxt;
1828        info.subctxt =  subctxt_fp(fp);
1829        /* Number of user ctxts available for this device. */
1830        info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
1831        info.num_subctxts = rcd->subctxt_cnt;
1832        info.rec_cpu = fd->rec_cpu_num;
1833        sz = sizeof(info);
1834
1835        if (copy_to_user(uinfo, &info, sz)) {
1836                ret = -EFAULT;
1837                goto bail;
1838        }
1839        ret = 0;
1840
1841bail:
1842        return ret;
1843}
1844
1845static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
1846                                 u32 __user *inflightp)
1847{
1848        const u32 val = qib_user_sdma_inflight_counter(pq);
1849
1850        if (put_user(val, inflightp))
1851                return -EFAULT;
1852
1853        return 0;
1854}
1855
1856static int qib_sdma_get_complete(struct qib_pportdata *ppd,
1857                                 struct qib_user_sdma_queue *pq,
1858                                 u32 __user *completep)
1859{
1860        u32 val;
1861        int err;
1862
1863        if (!pq)
1864                return -EINVAL;
1865
1866        err = qib_user_sdma_make_progress(ppd, pq);
1867        if (err < 0)
1868                return err;
1869
1870        val = qib_user_sdma_complete_counter(pq);
1871        if (put_user(val, completep))
1872                return -EFAULT;
1873
1874        return 0;
1875}
1876
1877static int disarm_req_delay(struct qib_ctxtdata *rcd)
1878{
1879        int ret = 0;
1880
1881        if (!usable(rcd->ppd)) {
1882                int i;
1883                /*
1884                 * if link is down, or otherwise not usable, delay
1885                 * the caller up to 30 seconds, so we don't thrash
1886                 * in trying to get the chip back to ACTIVE, and
1887                 * set flag so they make the call again.
1888                 */
1889                if (rcd->user_event_mask) {
1890                        /*
1891                         * subctxt_cnt is 0 if not shared, so do base
1892                         * separately, first, then remaining subctxt, if any
1893                         */
1894                        set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1895                                &rcd->user_event_mask[0]);
1896                        for (i = 1; i < rcd->subctxt_cnt; i++)
1897                                set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1898                                        &rcd->user_event_mask[i]);
1899                }
1900                for (i = 0; !usable(rcd->ppd) && i < 300; i++)
1901                        msleep(100);
1902                ret = -ENETDOWN;
1903        }
1904        return ret;
1905}
1906
1907/*
1908 * Find all user contexts in use, and set the specified bit in their
1909 * event mask.
1910 * See also find_ctxt() for a similar use, that is specific to send buffers.
1911 */
1912int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
1913{
1914        struct qib_ctxtdata *rcd;
1915        unsigned ctxt;
1916        int ret = 0;
1917        unsigned long flags;
1918
1919        spin_lock_irqsave(&ppd->dd->uctxt_lock, flags);
1920        for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
1921             ctxt++) {
1922                rcd = ppd->dd->rcd[ctxt];
1923                if (!rcd)
1924                        continue;
1925                if (rcd->user_event_mask) {
1926                        int i;
1927                        /*
1928                         * subctxt_cnt is 0 if not shared, so do base
1929                         * separately, first, then remaining subctxt, if any
1930                         */
1931                        set_bit(evtbit, &rcd->user_event_mask[0]);
1932                        for (i = 1; i < rcd->subctxt_cnt; i++)
1933                                set_bit(evtbit, &rcd->user_event_mask[i]);
1934                }
1935                ret = 1;
1936                break;
1937        }
1938        spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags);
1939
1940        return ret;
1941}
1942
1943/*
1944 * clear the event notifier events for this context.
1945 * For the DISARM_BUFS case, we also take action (this obsoletes
1946 * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards
1947 * compatibility.
1948 * Other bits don't currently require actions, just atomically clear.
1949 * User process then performs actions appropriate to bit having been
1950 * set, if desired, and checks again in future.
1951 */
1952static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
1953                              unsigned long events)
1954{
1955        int ret = 0, i;
1956
1957        for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) {
1958                if (!test_bit(i, &events))
1959                        continue;
1960                if (i == _QIB_EVENT_DISARM_BUFS_BIT) {
1961                        (void)qib_disarm_piobufs_ifneeded(rcd);
1962                        ret = disarm_req_delay(rcd);
1963                } else
1964                        clear_bit(i, &rcd->user_event_mask[subctxt]);
1965        }
1966        return ret;
1967}
1968
1969static ssize_t qib_write(struct file *fp, const char __user *data,
1970                         size_t count, loff_t *off)
1971{
1972        const struct qib_cmd __user *ucmd;
1973        struct qib_ctxtdata *rcd;
1974        const void __user *src;
1975        size_t consumed, copy = 0;
1976        struct qib_cmd cmd;
1977        ssize_t ret = 0;
1978        void *dest;
1979
1980        if (count < sizeof(cmd.type)) {
1981                ret = -EINVAL;
1982                goto bail;
1983        }
1984
1985        ucmd = (const struct qib_cmd __user *) data;
1986
1987        if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
1988                ret = -EFAULT;
1989                goto bail;
1990        }
1991
1992        consumed = sizeof(cmd.type);
1993
1994        switch (cmd.type) {
1995        case QIB_CMD_ASSIGN_CTXT:
1996        case QIB_CMD_USER_INIT:
1997                copy = sizeof(cmd.cmd.user_info);
1998                dest = &cmd.cmd.user_info;
1999                src = &ucmd->cmd.user_info;
2000                break;
2001
2002        case QIB_CMD_RECV_CTRL:
2003                copy = sizeof(cmd.cmd.recv_ctrl);
2004                dest = &cmd.cmd.recv_ctrl;
2005                src = &ucmd->cmd.recv_ctrl;
2006                break;
2007
2008        case QIB_CMD_CTXT_INFO:
2009                copy = sizeof(cmd.cmd.ctxt_info);
2010                dest = &cmd.cmd.ctxt_info;
2011                src = &ucmd->cmd.ctxt_info;
2012                break;
2013
2014        case QIB_CMD_TID_UPDATE:
2015        case QIB_CMD_TID_FREE:
2016                copy = sizeof(cmd.cmd.tid_info);
2017                dest = &cmd.cmd.tid_info;
2018                src = &ucmd->cmd.tid_info;
2019                break;
2020
2021        case QIB_CMD_SET_PART_KEY:
2022                copy = sizeof(cmd.cmd.part_key);
2023                dest = &cmd.cmd.part_key;
2024                src = &ucmd->cmd.part_key;
2025                break;
2026
2027        case QIB_CMD_DISARM_BUFS:
2028        case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */
2029                copy = 0;
2030                src = NULL;
2031                dest = NULL;
2032                break;
2033
2034        case QIB_CMD_POLL_TYPE:
2035                copy = sizeof(cmd.cmd.poll_type);
2036                dest = &cmd.cmd.poll_type;
2037                src = &ucmd->cmd.poll_type;
2038                break;
2039
2040        case QIB_CMD_ARMLAUNCH_CTRL:
2041                copy = sizeof(cmd.cmd.armlaunch_ctrl);
2042                dest = &cmd.cmd.armlaunch_ctrl;
2043                src = &ucmd->cmd.armlaunch_ctrl;
2044                break;
2045
2046        case QIB_CMD_SDMA_INFLIGHT:
2047                copy = sizeof(cmd.cmd.sdma_inflight);
2048                dest = &cmd.cmd.sdma_inflight;
2049                src = &ucmd->cmd.sdma_inflight;
2050                break;
2051
2052        case QIB_CMD_SDMA_COMPLETE:
2053                copy = sizeof(cmd.cmd.sdma_complete);
2054                dest = &cmd.cmd.sdma_complete;
2055                src = &ucmd->cmd.sdma_complete;
2056                break;
2057
2058        case QIB_CMD_ACK_EVENT:
2059                copy = sizeof(cmd.cmd.event_mask);
2060                dest = &cmd.cmd.event_mask;
2061                src = &ucmd->cmd.event_mask;
2062                break;
2063
2064        default:
2065                ret = -EINVAL;
2066                goto bail;
2067        }
2068
2069        if (copy) {
2070                if ((count - consumed) < copy) {
2071                        ret = -EINVAL;
2072                        goto bail;
2073                }
2074                if (copy_from_user(dest, src, copy)) {
2075                        ret = -EFAULT;
2076                        goto bail;
2077                }
2078                consumed += copy;
2079        }
2080
2081        rcd = ctxt_fp(fp);
2082        if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) {
2083                ret = -EINVAL;
2084                goto bail;
2085        }
2086
2087        switch (cmd.type) {
2088        case QIB_CMD_ASSIGN_CTXT:
2089                ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
2090                if (ret)
2091                        goto bail;
2092                break;
2093
2094        case QIB_CMD_USER_INIT:
2095                ret = qib_do_user_init(fp, &cmd.cmd.user_info);
2096                if (ret)
2097                        goto bail;
2098                ret = qib_get_base_info(fp, (void __user *) (unsigned long)
2099                                        cmd.cmd.user_info.spu_base_info,
2100                                        cmd.cmd.user_info.spu_base_info_size);
2101                break;
2102
2103        case QIB_CMD_RECV_CTRL:
2104                ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl);
2105                break;
2106
2107        case QIB_CMD_CTXT_INFO:
2108                ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *)
2109                                    (unsigned long) cmd.cmd.ctxt_info);
2110                break;
2111
2112        case QIB_CMD_TID_UPDATE:
2113                ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info);
2114                break;
2115
2116        case QIB_CMD_TID_FREE:
2117                ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info);
2118                break;
2119
2120        case QIB_CMD_SET_PART_KEY:
2121                ret = qib_set_part_key(rcd, cmd.cmd.part_key);
2122                break;
2123
2124        case QIB_CMD_DISARM_BUFS:
2125                (void)qib_disarm_piobufs_ifneeded(rcd);
2126                ret = disarm_req_delay(rcd);
2127                break;
2128
2129        case QIB_CMD_PIOAVAILUPD:
2130                qib_force_pio_avail_update(rcd->dd);
2131                break;
2132
2133        case QIB_CMD_POLL_TYPE:
2134                rcd->poll_type = cmd.cmd.poll_type;
2135                break;
2136
2137        case QIB_CMD_ARMLAUNCH_CTRL:
2138                rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
2139                break;
2140
2141        case QIB_CMD_SDMA_INFLIGHT:
2142                ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp),
2143                                            (u32 __user *) (unsigned long)
2144                                            cmd.cmd.sdma_inflight);
2145                break;
2146
2147        case QIB_CMD_SDMA_COMPLETE:
2148                ret = qib_sdma_get_complete(rcd->ppd,
2149                                            user_sdma_queue_fp(fp),
2150                                            (u32 __user *) (unsigned long)
2151                                            cmd.cmd.sdma_complete);
2152                break;
2153
2154        case QIB_CMD_ACK_EVENT:
2155                ret = qib_user_event_ack(rcd, subctxt_fp(fp),
2156                                         cmd.cmd.event_mask);
2157                break;
2158        }
2159
2160        if (ret >= 0)
2161                ret = consumed;
2162
2163bail:
2164        return ret;
2165}
2166
2167static ssize_t qib_aio_write(struct kiocb *iocb, const struct iovec *iov,
2168                             unsigned long dim, loff_t off)
2169{
2170        struct qib_filedata *fp = iocb->ki_filp->private_data;
2171        struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
2172        struct qib_user_sdma_queue *pq = fp->pq;
2173
2174        if (!dim || !pq)
2175                return -EINVAL;
2176
2177        return qib_user_sdma_writev(rcd, pq, iov, dim);
2178}
2179
2180static struct class *qib_class;
2181static dev_t qib_dev;
2182
2183int qib_cdev_init(int minor, const char *name,
2184                  const struct file_operations *fops,
2185                  struct cdev **cdevp, struct device **devp)
2186{
2187        const dev_t dev = MKDEV(MAJOR(qib_dev), minor);
2188        struct cdev *cdev;
2189        struct device *device = NULL;
2190        int ret;
2191
2192        cdev = cdev_alloc();
2193        if (!cdev) {
2194                pr_err("Could not allocate cdev for minor %d, %s\n",
2195                       minor, name);
2196                ret = -ENOMEM;
2197                goto done;
2198        }
2199
2200        cdev->owner = THIS_MODULE;
2201        cdev->ops = fops;
2202        kobject_set_name(&cdev->kobj, name);
2203
2204        ret = cdev_add(cdev, dev, 1);
2205        if (ret < 0) {
2206                pr_err("Could not add cdev for minor %d, %s (err %d)\n",
2207                       minor, name, -ret);
2208                goto err_cdev;
2209        }
2210
2211        device = device_create(qib_class, NULL, dev, NULL, name);
2212        if (!IS_ERR(device))
2213                goto done;
2214        ret = PTR_ERR(device);
2215        device = NULL;
2216        pr_err("Could not create device for minor %d, %s (err %d)\n",
2217               minor, name, -ret);
2218err_cdev:
2219        cdev_del(cdev);
2220        cdev = NULL;
2221done:
2222        *cdevp = cdev;
2223        *devp = device;
2224        return ret;
2225}
2226
2227void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
2228{
2229        struct device *device = *devp;
2230
2231        if (device) {
2232                device_unregister(device);
2233                *devp = NULL;
2234        }
2235
2236        if (*cdevp) {
2237                cdev_del(*cdevp);
2238                *cdevp = NULL;
2239        }
2240}
2241
2242static struct cdev *wildcard_cdev;
2243static struct device *wildcard_device;
2244
2245int __init qib_dev_init(void)
2246{
2247        int ret;
2248
2249        ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
2250        if (ret < 0) {
2251                pr_err("Could not allocate chrdev region (err %d)\n", -ret);
2252                goto done;
2253        }
2254
2255        qib_class = class_create(THIS_MODULE, "ipath");
2256        if (IS_ERR(qib_class)) {
2257                ret = PTR_ERR(qib_class);
2258                pr_err("Could not create device class (err %d)\n", -ret);
2259                unregister_chrdev_region(qib_dev, QIB_NMINORS);
2260        }
2261
2262done:
2263        return ret;
2264}
2265
2266void qib_dev_cleanup(void)
2267{
2268        if (qib_class) {
2269                class_destroy(qib_class);
2270                qib_class = NULL;
2271        }
2272
2273        unregister_chrdev_region(qib_dev, QIB_NMINORS);
2274}
2275
2276static atomic_t user_count = ATOMIC_INIT(0);
2277
2278static void qib_user_remove(struct qib_devdata *dd)
2279{
2280        if (atomic_dec_return(&user_count) == 0)
2281                qib_cdev_cleanup(&wildcard_cdev, &wildcard_device);
2282
2283        qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
2284}
2285
2286static int qib_user_add(struct qib_devdata *dd)
2287{
2288        char name[10];
2289        int ret;
2290
2291        if (atomic_inc_return(&user_count) == 1) {
2292                ret = qib_cdev_init(0, "ipath", &qib_file_ops,
2293                                    &wildcard_cdev, &wildcard_device);
2294                if (ret)
2295                        goto done;
2296        }
2297
2298        snprintf(name, sizeof(name), "ipath%d", dd->unit);
2299        ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
2300                            &dd->user_cdev, &dd->user_device);
2301        if (ret)
2302                qib_user_remove(dd);
2303done:
2304        return ret;
2305}
2306
2307/*
2308 * Create per-unit files in /dev
2309 */
2310int qib_device_create(struct qib_devdata *dd)
2311{
2312        int r, ret;
2313
2314        r = qib_user_add(dd);
2315        ret = qib_diag_add(dd);
2316        if (r && !ret)
2317                ret = r;
2318        return ret;
2319}
2320
2321/*
2322 * Remove per-unit files in /dev
2323 * void, core kernel returns no errors for this stuff
2324 */
2325void qib_device_remove(struct qib_devdata *dd)
2326{
2327        qib_user_remove(dd);
2328        qib_diag_remove(dd);
2329}
2330