linux/drivers/infiniband/hw/qib/qib_file_ops.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
   3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
   4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include <linux/pci.h>
  36#include <linux/poll.h>
  37#include <linux/cdev.h>
  38#include <linux/swap.h>
  39#include <linux/vmalloc.h>
  40#include <linux/highmem.h>
  41#include <linux/io.h>
  42#include <linux/aio.h>
  43#include <linux/jiffies.h>
  44#include <asm/pgtable.h>
  45#include <linux/delay.h>
  46#include <linux/export.h>
  47
  48#include "qib.h"
  49#include "qib_common.h"
  50#include "qib_user_sdma.h"
  51
  52#undef pr_fmt
  53#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
  54
  55static int qib_open(struct inode *, struct file *);
  56static int qib_close(struct inode *, struct file *);
  57static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
  58static ssize_t qib_aio_write(struct kiocb *, const struct iovec *,
  59                             unsigned long, loff_t);
  60static unsigned int qib_poll(struct file *, struct poll_table_struct *);
  61static int qib_mmapf(struct file *, struct vm_area_struct *);
  62
  63static const struct file_operations qib_file_ops = {
  64        .owner = THIS_MODULE,
  65        .write = qib_write,
  66        .aio_write = qib_aio_write,
  67        .open = qib_open,
  68        .release = qib_close,
  69        .poll = qib_poll,
  70        .mmap = qib_mmapf,
  71        .llseek = noop_llseek,
  72};
  73
  74/*
  75 * Convert kernel virtual addresses to physical addresses so they don't
  76 * potentially conflict with the chip addresses used as mmap offsets.
  77 * It doesn't really matter what mmap offset we use as long as we can
  78 * interpret it correctly.
  79 */
  80static u64 cvt_kvaddr(void *p)
  81{
  82        struct page *page;
  83        u64 paddr = 0;
  84
  85        page = vmalloc_to_page(p);
  86        if (page)
  87                paddr = page_to_pfn(page) << PAGE_SHIFT;
  88
  89        return paddr;
  90}
  91
  92static int qib_get_base_info(struct file *fp, void __user *ubase,
  93                             size_t ubase_size)
  94{
  95        struct qib_ctxtdata *rcd = ctxt_fp(fp);
  96        int ret = 0;
  97        struct qib_base_info *kinfo = NULL;
  98        struct qib_devdata *dd = rcd->dd;
  99        struct qib_pportdata *ppd = rcd->ppd;
 100        unsigned subctxt_cnt;
 101        int shared, master;
 102        size_t sz;
 103
 104        subctxt_cnt = rcd->subctxt_cnt;
 105        if (!subctxt_cnt) {
 106                shared = 0;
 107                master = 0;
 108                subctxt_cnt = 1;
 109        } else {
 110                shared = 1;
 111                master = !subctxt_fp(fp);
 112        }
 113
 114        sz = sizeof(*kinfo);
 115        /* If context sharing is not requested, allow the old size structure */
 116        if (!shared)
 117                sz -= 7 * sizeof(u64);
 118        if (ubase_size < sz) {
 119                ret = -EINVAL;
 120                goto bail;
 121        }
 122
 123        kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
 124        if (kinfo == NULL) {
 125                ret = -ENOMEM;
 126                goto bail;
 127        }
 128
 129        ret = dd->f_get_base_info(rcd, kinfo);
 130        if (ret < 0)
 131                goto bail;
 132
 133        kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
 134        kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
 135        kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
 136        kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
 137        /*
 138         * have to mmap whole thing
 139         */
 140        kinfo->spi_rcv_egrbuftotlen =
 141                rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
 142        kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
 143        kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
 144                rcd->rcvegrbuf_chunks;
 145        kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
 146        if (master)
 147                kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
 148        /*
 149         * for this use, may be cfgctxts summed over all chips that
 150         * are are configured and present
 151         */
 152        kinfo->spi_nctxts = dd->cfgctxts;
 153        /* unit (chip/board) our context is on */
 154        kinfo->spi_unit = dd->unit;
 155        kinfo->spi_port = ppd->port;
 156        /* for now, only a single page */
 157        kinfo->spi_tid_maxsize = PAGE_SIZE;
 158
 159        /*
 160         * Doing this per context, and based on the skip value, etc.  This has
 161         * to be the actual buffer size, since the protocol code treats it
 162         * as an array.
 163         *
 164         * These have to be set to user addresses in the user code via mmap.
 165         * These values are used on return to user code for the mmap target
 166         * addresses only.  For 32 bit, same 44 bit address problem, so use
 167         * the physical address, not virtual.  Before 2.6.11, using the
 168         * page_address() macro worked, but in 2.6.11, even that returns the
 169         * full 64 bit address (upper bits all 1's).  So far, using the
 170         * physical addresses (or chip offsets, for chip mapping) works, but
 171         * no doubt some future kernel release will change that, and we'll be
 172         * on to yet another method of dealing with this.
 173         * Normally only one of rcvhdr_tailaddr or rhf_offset is useful
 174         * since the chips with non-zero rhf_offset don't normally
 175         * enable tail register updates to host memory, but for testing,
 176         * both can be enabled and used.
 177         */
 178        kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys;
 179        kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys;
 180        kinfo->spi_rhf_offset = dd->rhf_offset;
 181        kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys;
 182        kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
 183        /* setup per-unit (not port) status area for user programs */
 184        kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
 185                (char *) ppd->statusp -
 186                (char *) dd->pioavailregs_dma;
 187        kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
 188        if (!shared) {
 189                kinfo->spi_piocnt = rcd->piocnt;
 190                kinfo->spi_piobufbase = (u64) rcd->piobufs;
 191                kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask);
 192        } else if (master) {
 193                kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) +
 194                                    (rcd->piocnt % subctxt_cnt);
 195                /* Master's PIO buffers are after all the slave's */
 196                kinfo->spi_piobufbase = (u64) rcd->piobufs +
 197                        dd->palign *
 198                        (rcd->piocnt - kinfo->spi_piocnt);
 199        } else {
 200                unsigned slave = subctxt_fp(fp) - 1;
 201
 202                kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt;
 203                kinfo->spi_piobufbase = (u64) rcd->piobufs +
 204                        dd->palign * kinfo->spi_piocnt * slave;
 205        }
 206
 207        if (shared) {
 208                kinfo->spi_sendbuf_status =
 209                        cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]);
 210                /* only spi_subctxt_* fields should be set in this block! */
 211                kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase);
 212
 213                kinfo->spi_subctxt_rcvegrbuf =
 214                        cvt_kvaddr(rcd->subctxt_rcvegrbuf);
 215                kinfo->spi_subctxt_rcvhdr_base =
 216                        cvt_kvaddr(rcd->subctxt_rcvhdr_base);
 217        }
 218
 219        /*
 220         * All user buffers are 2KB buffers.  If we ever support
 221         * giving 4KB buffers to user processes, this will need some
 222         * work.  Can't use piobufbase directly, because it has
 223         * both 2K and 4K buffer base values.
 224         */
 225        kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
 226                dd->palign;
 227        kinfo->spi_pioalign = dd->palign;
 228        kinfo->spi_qpair = QIB_KD_QP;
 229        /*
 230         * user mode PIO buffers are always 2KB, even when 4KB can
 231         * be received, and sent via the kernel; this is ibmaxlen
 232         * for 2K MTU.
 233         */
 234        kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
 235        kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */
 236        kinfo->spi_ctxt = rcd->ctxt;
 237        kinfo->spi_subctxt = subctxt_fp(fp);
 238        kinfo->spi_sw_version = QIB_KERN_SWVERSION;
 239        kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */
 240        kinfo->spi_hw_version = dd->revision;
 241
 242        if (master)
 243                kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER;
 244
 245        sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
 246        if (copy_to_user(ubase, kinfo, sz))
 247                ret = -EFAULT;
 248bail:
 249        kfree(kinfo);
 250        return ret;
 251}
 252
 253/**
 254 * qib_tid_update - update a context TID
 255 * @rcd: the context
 256 * @fp: the qib device file
 257 * @ti: the TID information
 258 *
 259 * The new implementation as of Oct 2004 is that the driver assigns
 260 * the tid and returns it to the caller.   To reduce search time, we
 261 * keep a cursor for each context, walking the shadow tid array to find
 262 * one that's not in use.
 263 *
 264 * For now, if we can't allocate the full list, we fail, although
 265 * in the long run, we'll allocate as many as we can, and the
 266 * caller will deal with that by trying the remaining pages later.
 267 * That means that when we fail, we have to mark the tids as not in
 268 * use again, in our shadow copy.
 269 *
 270 * It's up to the caller to free the tids when they are done.
 271 * We'll unlock the pages as they free them.
 272 *
 273 * Also, right now we are locking one page at a time, but since
 274 * the intended use of this routine is for a single group of
 275 * virtually contiguous pages, that should change to improve
 276 * performance.
 277 */
 278static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
 279                          const struct qib_tid_info *ti)
 280{
 281        int ret = 0, ntids;
 282        u32 tid, ctxttid, cnt, i, tidcnt, tidoff;
 283        u16 *tidlist;
 284        struct qib_devdata *dd = rcd->dd;
 285        u64 physaddr;
 286        unsigned long vaddr;
 287        u64 __iomem *tidbase;
 288        unsigned long tidmap[8];
 289        struct page **pagep = NULL;
 290        unsigned subctxt = subctxt_fp(fp);
 291
 292        if (!dd->pageshadow) {
 293                ret = -ENOMEM;
 294                goto done;
 295        }
 296
 297        cnt = ti->tidcnt;
 298        if (!cnt) {
 299                ret = -EFAULT;
 300                goto done;
 301        }
 302        ctxttid = rcd->ctxt * dd->rcvtidcnt;
 303        if (!rcd->subctxt_cnt) {
 304                tidcnt = dd->rcvtidcnt;
 305                tid = rcd->tidcursor;
 306                tidoff = 0;
 307        } else if (!subctxt) {
 308                tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
 309                         (dd->rcvtidcnt % rcd->subctxt_cnt);
 310                tidoff = dd->rcvtidcnt - tidcnt;
 311                ctxttid += tidoff;
 312                tid = tidcursor_fp(fp);
 313        } else {
 314                tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
 315                tidoff = tidcnt * (subctxt - 1);
 316                ctxttid += tidoff;
 317                tid = tidcursor_fp(fp);
 318        }
 319        if (cnt > tidcnt) {
 320                /* make sure it all fits in tid_pg_list */
 321                qib_devinfo(dd->pcidev,
 322                        "Process tried to allocate %u TIDs, only trying max (%u)\n",
 323                        cnt, tidcnt);
 324                cnt = tidcnt;
 325        }
 326        pagep = (struct page **) rcd->tid_pg_list;
 327        tidlist = (u16 *) &pagep[dd->rcvtidcnt];
 328        pagep += tidoff;
 329        tidlist += tidoff;
 330
 331        memset(tidmap, 0, sizeof(tidmap));
 332        /* before decrement; chip actual # */
 333        ntids = tidcnt;
 334        tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
 335                                   dd->rcvtidbase +
 336                                   ctxttid * sizeof(*tidbase));
 337
 338        /* virtual address of first page in transfer */
 339        vaddr = ti->tidvaddr;
 340        if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
 341                       cnt * PAGE_SIZE)) {
 342                ret = -EFAULT;
 343                goto done;
 344        }
 345        ret = qib_get_user_pages(vaddr, cnt, pagep);
 346        if (ret) {
 347                /*
 348                 * if (ret == -EBUSY)
 349                 * We can't continue because the pagep array won't be
 350                 * initialized. This should never happen,
 351                 * unless perhaps the user has mpin'ed the pages
 352                 * themselves.
 353                 */
 354                qib_devinfo(dd->pcidev,
 355                         "Failed to lock addr %p, %u pages: "
 356                         "errno %d\n", (void *) vaddr, cnt, -ret);
 357                goto done;
 358        }
 359        for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
 360                for (; ntids--; tid++) {
 361                        if (tid == tidcnt)
 362                                tid = 0;
 363                        if (!dd->pageshadow[ctxttid + tid])
 364                                break;
 365                }
 366                if (ntids < 0) {
 367                        /*
 368                         * Oops, wrapped all the way through their TIDs,
 369                         * and didn't have enough free; see comments at
 370                         * start of routine
 371                         */
 372                        i--;    /* last tidlist[i] not filled in */
 373                        ret = -ENOMEM;
 374                        break;
 375                }
 376                tidlist[i] = tid + tidoff;
 377                /* we "know" system pages and TID pages are same size */
 378                dd->pageshadow[ctxttid + tid] = pagep[i];
 379                dd->physshadow[ctxttid + tid] =
 380                        qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
 381                                     PCI_DMA_FROMDEVICE);
 382                /*
 383                 * don't need atomic or it's overhead
 384                 */
 385                __set_bit(tid, tidmap);
 386                physaddr = dd->physshadow[ctxttid + tid];
 387                /* PERFORMANCE: below should almost certainly be cached */
 388                dd->f_put_tid(dd, &tidbase[tid],
 389                                  RCVHQ_RCV_TYPE_EXPECTED, physaddr);
 390                /*
 391                 * don't check this tid in qib_ctxtshadow, since we
 392                 * just filled it in; start with the next one.
 393                 */
 394                tid++;
 395        }
 396
 397        if (ret) {
 398                u32 limit;
 399cleanup:
 400                /* jump here if copy out of updated info failed... */
 401                /* same code that's in qib_free_tid() */
 402                limit = sizeof(tidmap) * BITS_PER_BYTE;
 403                if (limit > tidcnt)
 404                        /* just in case size changes in future */
 405                        limit = tidcnt;
 406                tid = find_first_bit((const unsigned long *)tidmap, limit);
 407                for (; tid < limit; tid++) {
 408                        if (!test_bit(tid, tidmap))
 409                                continue;
 410                        if (dd->pageshadow[ctxttid + tid]) {
 411                                dma_addr_t phys;
 412
 413                                phys = dd->physshadow[ctxttid + tid];
 414                                dd->physshadow[ctxttid + tid] = dd->tidinvalid;
 415                                /* PERFORMANCE: below should almost certainly
 416                                 * be cached
 417                                 */
 418                                dd->f_put_tid(dd, &tidbase[tid],
 419                                              RCVHQ_RCV_TYPE_EXPECTED,
 420                                              dd->tidinvalid);
 421                                pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
 422                                               PCI_DMA_FROMDEVICE);
 423                                dd->pageshadow[ctxttid + tid] = NULL;
 424                        }
 425                }
 426                qib_release_user_pages(pagep, cnt);
 427        } else {
 428                /*
 429                 * Copy the updated array, with qib_tid's filled in, back
 430                 * to user.  Since we did the copy in already, this "should
 431                 * never fail" If it does, we have to clean up...
 432                 */
 433                if (copy_to_user((void __user *)
 434                                 (unsigned long) ti->tidlist,
 435                                 tidlist, cnt * sizeof(*tidlist))) {
 436                        ret = -EFAULT;
 437                        goto cleanup;
 438                }
 439                if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
 440                                 tidmap, sizeof tidmap)) {
 441                        ret = -EFAULT;
 442                        goto cleanup;
 443                }
 444                if (tid == tidcnt)
 445                        tid = 0;
 446                if (!rcd->subctxt_cnt)
 447                        rcd->tidcursor = tid;
 448                else
 449                        tidcursor_fp(fp) = tid;
 450        }
 451
 452done:
 453        return ret;
 454}
 455
 456/**
 457 * qib_tid_free - free a context TID
 458 * @rcd: the context
 459 * @subctxt: the subcontext
 460 * @ti: the TID info
 461 *
 462 * right now we are unlocking one page at a time, but since
 463 * the intended use of this routine is for a single group of
 464 * virtually contiguous pages, that should change to improve
 465 * performance.  We check that the TID is in range for this context
 466 * but otherwise don't check validity; if user has an error and
 467 * frees the wrong tid, it's only their own data that can thereby
 468 * be corrupted.  We do check that the TID was in use, for sanity
 469 * We always use our idea of the saved address, not the address that
 470 * they pass in to us.
 471 */
 472static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
 473                        const struct qib_tid_info *ti)
 474{
 475        int ret = 0;
 476        u32 tid, ctxttid, cnt, limit, tidcnt;
 477        struct qib_devdata *dd = rcd->dd;
 478        u64 __iomem *tidbase;
 479        unsigned long tidmap[8];
 480
 481        if (!dd->pageshadow) {
 482                ret = -ENOMEM;
 483                goto done;
 484        }
 485
 486        if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
 487                           sizeof tidmap)) {
 488                ret = -EFAULT;
 489                goto done;
 490        }
 491
 492        ctxttid = rcd->ctxt * dd->rcvtidcnt;
 493        if (!rcd->subctxt_cnt)
 494                tidcnt = dd->rcvtidcnt;
 495        else if (!subctxt) {
 496                tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
 497                         (dd->rcvtidcnt % rcd->subctxt_cnt);
 498                ctxttid += dd->rcvtidcnt - tidcnt;
 499        } else {
 500                tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
 501                ctxttid += tidcnt * (subctxt - 1);
 502        }
 503        tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
 504                                   dd->rcvtidbase +
 505                                   ctxttid * sizeof(*tidbase));
 506
 507        limit = sizeof(tidmap) * BITS_PER_BYTE;
 508        if (limit > tidcnt)
 509                /* just in case size changes in future */
 510                limit = tidcnt;
 511        tid = find_first_bit(tidmap, limit);
 512        for (cnt = 0; tid < limit; tid++) {
 513                /*
 514                 * small optimization; if we detect a run of 3 or so without
 515                 * any set, use find_first_bit again.  That's mainly to
 516                 * accelerate the case where we wrapped, so we have some at
 517                 * the beginning, and some at the end, and a big gap
 518                 * in the middle.
 519                 */
 520                if (!test_bit(tid, tidmap))
 521                        continue;
 522                cnt++;
 523                if (dd->pageshadow[ctxttid + tid]) {
 524                        struct page *p;
 525                        dma_addr_t phys;
 526
 527                        p = dd->pageshadow[ctxttid + tid];
 528                        dd->pageshadow[ctxttid + tid] = NULL;
 529                        phys = dd->physshadow[ctxttid + tid];
 530                        dd->physshadow[ctxttid + tid] = dd->tidinvalid;
 531                        /* PERFORMANCE: below should almost certainly be
 532                         * cached
 533                         */
 534                        dd->f_put_tid(dd, &tidbase[tid],
 535                                      RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
 536                        pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
 537                                       PCI_DMA_FROMDEVICE);
 538                        qib_release_user_pages(&p, 1);
 539                }
 540        }
 541done:
 542        return ret;
 543}
 544
 545/**
 546 * qib_set_part_key - set a partition key
 547 * @rcd: the context
 548 * @key: the key
 549 *
 550 * We can have up to 4 active at a time (other than the default, which is
 551 * always allowed).  This is somewhat tricky, since multiple contexts may set
 552 * the same key, so we reference count them, and clean up at exit.  All 4
 553 * partition keys are packed into a single qlogic_ib register.  It's an
 554 * error for a process to set the same pkey multiple times.  We provide no
 555 * mechanism to de-allocate a pkey at this time, we may eventually need to
 556 * do that.  I've used the atomic operations, and no locking, and only make
 557 * a single pass through what's available.  This should be more than
 558 * adequate for some time. I'll think about spinlocks or the like if and as
 559 * it's necessary.
 560 */
 561static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
 562{
 563        struct qib_pportdata *ppd = rcd->ppd;
 564        int i, any = 0, pidx = -1;
 565        u16 lkey = key & 0x7FFF;
 566        int ret;
 567
 568        if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) {
 569                /* nothing to do; this key always valid */
 570                ret = 0;
 571                goto bail;
 572        }
 573
 574        if (!lkey) {
 575                ret = -EINVAL;
 576                goto bail;
 577        }
 578
 579        /*
 580         * Set the full membership bit, because it has to be
 581         * set in the register or the packet, and it seems
 582         * cleaner to set in the register than to force all
 583         * callers to set it.
 584         */
 585        key |= 0x8000;
 586
 587        for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
 588                if (!rcd->pkeys[i] && pidx == -1)
 589                        pidx = i;
 590                if (rcd->pkeys[i] == key) {
 591                        ret = -EEXIST;
 592                        goto bail;
 593                }
 594        }
 595        if (pidx == -1) {
 596                ret = -EBUSY;
 597                goto bail;
 598        }
 599        for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
 600                if (!ppd->pkeys[i]) {
 601                        any++;
 602                        continue;
 603                }
 604                if (ppd->pkeys[i] == key) {
 605                        atomic_t *pkrefs = &ppd->pkeyrefs[i];
 606
 607                        if (atomic_inc_return(pkrefs) > 1) {
 608                                rcd->pkeys[pidx] = key;
 609                                ret = 0;
 610                                goto bail;
 611                        } else {
 612                                /*
 613                                 * lost race, decrement count, catch below
 614                                 */
 615                                atomic_dec(pkrefs);
 616                                any++;
 617                        }
 618                }
 619                if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
 620                        /*
 621                         * It makes no sense to have both the limited and
 622                         * full membership PKEY set at the same time since
 623                         * the unlimited one will disable the limited one.
 624                         */
 625                        ret = -EEXIST;
 626                        goto bail;
 627                }
 628        }
 629        if (!any) {
 630                ret = -EBUSY;
 631                goto bail;
 632        }
 633        for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
 634                if (!ppd->pkeys[i] &&
 635                    atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
 636                        rcd->pkeys[pidx] = key;
 637                        ppd->pkeys[i] = key;
 638                        (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
 639                        ret = 0;
 640                        goto bail;
 641                }
 642        }
 643        ret = -EBUSY;
 644
 645bail:
 646        return ret;
 647}
 648
 649/**
 650 * qib_manage_rcvq - manage a context's receive queue
 651 * @rcd: the context
 652 * @subctxt: the subcontext
 653 * @start_stop: action to carry out
 654 *
 655 * start_stop == 0 disables receive on the context, for use in queue
 656 * overflow conditions.  start_stop==1 re-enables, to be used to
 657 * re-init the software copy of the head register
 658 */
 659static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
 660                           int start_stop)
 661{
 662        struct qib_devdata *dd = rcd->dd;
 663        unsigned int rcvctrl_op;
 664
 665        if (subctxt)
 666                goto bail;
 667        /* atomically clear receive enable ctxt. */
 668        if (start_stop) {
 669                /*
 670                 * On enable, force in-memory copy of the tail register to
 671                 * 0, so that protocol code doesn't have to worry about
 672                 * whether or not the chip has yet updated the in-memory
 673                 * copy or not on return from the system call. The chip
 674                 * always resets it's tail register back to 0 on a
 675                 * transition from disabled to enabled.
 676                 */
 677                if (rcd->rcvhdrtail_kvaddr)
 678                        qib_clear_rcvhdrtail(rcd);
 679                rcvctrl_op = QIB_RCVCTRL_CTXT_ENB;
 680        } else
 681                rcvctrl_op = QIB_RCVCTRL_CTXT_DIS;
 682        dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
 683        /* always; new head should be equal to new tail; see above */
 684bail:
 685        return 0;
 686}
 687
 688static void qib_clean_part_key(struct qib_ctxtdata *rcd,
 689                               struct qib_devdata *dd)
 690{
 691        int i, j, pchanged = 0;
 692        u64 oldpkey;
 693        struct qib_pportdata *ppd = rcd->ppd;
 694
 695        /* for debugging only */
 696        oldpkey = (u64) ppd->pkeys[0] |
 697                ((u64) ppd->pkeys[1] << 16) |
 698                ((u64) ppd->pkeys[2] << 32) |
 699                ((u64) ppd->pkeys[3] << 48);
 700
 701        for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
 702                if (!rcd->pkeys[i])
 703                        continue;
 704                for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) {
 705                        /* check for match independent of the global bit */
 706                        if ((ppd->pkeys[j] & 0x7fff) !=
 707                            (rcd->pkeys[i] & 0x7fff))
 708                                continue;
 709                        if (atomic_dec_and_test(&ppd->pkeyrefs[j])) {
 710                                ppd->pkeys[j] = 0;
 711                                pchanged++;
 712                        }
 713                        break;
 714                }
 715                rcd->pkeys[i] = 0;
 716        }
 717        if (pchanged)
 718                (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
 719}
 720
 721/* common code for the mappings on dma_alloc_coherent mem */
 722static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
 723                        unsigned len, void *kvaddr, u32 write_ok, char *what)
 724{
 725        struct qib_devdata *dd = rcd->dd;
 726        unsigned long pfn;
 727        int ret;
 728
 729        if ((vma->vm_end - vma->vm_start) > len) {
 730                qib_devinfo(dd->pcidev,
 731                         "FAIL on %s: len %lx > %x\n", what,
 732                         vma->vm_end - vma->vm_start, len);
 733                ret = -EFAULT;
 734                goto bail;
 735        }
 736
 737        /*
 738         * shared context user code requires rcvhdrq mapped r/w, others
 739         * only allowed readonly mapping.
 740         */
 741        if (!write_ok) {
 742                if (vma->vm_flags & VM_WRITE) {
 743                        qib_devinfo(dd->pcidev,
 744                                 "%s must be mapped readonly\n", what);
 745                        ret = -EPERM;
 746                        goto bail;
 747                }
 748
 749                /* don't allow them to later change with mprotect */
 750                vma->vm_flags &= ~VM_MAYWRITE;
 751        }
 752
 753        pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
 754        ret = remap_pfn_range(vma, vma->vm_start, pfn,
 755                              len, vma->vm_page_prot);
 756        if (ret)
 757                qib_devinfo(dd->pcidev,
 758                        "%s ctxt%u mmap of %lx, %x bytes failed: %d\n",
 759                        what, rcd->ctxt, pfn, len, ret);
 760bail:
 761        return ret;
 762}
 763
 764static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
 765                     u64 ureg)
 766{
 767        unsigned long phys;
 768        unsigned long sz;
 769        int ret;
 770
 771        /*
 772         * This is real hardware, so use io_remap.  This is the mechanism
 773         * for the user process to update the head registers for their ctxt
 774         * in the chip.
 775         */
 776        sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
 777        if ((vma->vm_end - vma->vm_start) > sz) {
 778                qib_devinfo(dd->pcidev,
 779                        "FAIL mmap userreg: reqlen %lx > PAGE\n",
 780                        vma->vm_end - vma->vm_start);
 781                ret = -EFAULT;
 782        } else {
 783                phys = dd->physaddr + ureg;
 784                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 785
 786                vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
 787                ret = io_remap_pfn_range(vma, vma->vm_start,
 788                                         phys >> PAGE_SHIFT,
 789                                         vma->vm_end - vma->vm_start,
 790                                         vma->vm_page_prot);
 791        }
 792        return ret;
 793}
 794
 795static int mmap_piobufs(struct vm_area_struct *vma,
 796                        struct qib_devdata *dd,
 797                        struct qib_ctxtdata *rcd,
 798                        unsigned piobufs, unsigned piocnt)
 799{
 800        unsigned long phys;
 801        int ret;
 802
 803        /*
 804         * When we map the PIO buffers in the chip, we want to map them as
 805         * writeonly, no read possible; unfortunately, x86 doesn't allow
 806         * for this in hardware, but we still prevent users from asking
 807         * for it.
 808         */
 809        if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
 810                qib_devinfo(dd->pcidev,
 811                        "FAIL mmap piobufs: reqlen %lx > PAGE\n",
 812                         vma->vm_end - vma->vm_start);
 813                ret = -EINVAL;
 814                goto bail;
 815        }
 816
 817        phys = dd->physaddr + piobufs;
 818
 819#if defined(__powerpc__)
 820        /* There isn't a generic way to specify writethrough mappings */
 821        pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
 822        pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
 823        pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
 824#endif
 825
 826        /*
 827         * don't allow them to later change to readable with mprotect (for when
 828         * not initially mapped readable, as is normally the case)
 829         */
 830        vma->vm_flags &= ~VM_MAYREAD;
 831        vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
 832
 833        if (qib_wc_pat)
 834                vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 835
 836        ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
 837                                 vma->vm_end - vma->vm_start,
 838                                 vma->vm_page_prot);
 839bail:
 840        return ret;
 841}
 842
 843static int mmap_rcvegrbufs(struct vm_area_struct *vma,
 844                           struct qib_ctxtdata *rcd)
 845{
 846        struct qib_devdata *dd = rcd->dd;
 847        unsigned long start, size;
 848        size_t total_size, i;
 849        unsigned long pfn;
 850        int ret;
 851
 852        size = rcd->rcvegrbuf_size;
 853        total_size = rcd->rcvegrbuf_chunks * size;
 854        if ((vma->vm_end - vma->vm_start) > total_size) {
 855                qib_devinfo(dd->pcidev,
 856                        "FAIL on egr bufs: reqlen %lx > actual %lx\n",
 857                         vma->vm_end - vma->vm_start,
 858                         (unsigned long) total_size);
 859                ret = -EINVAL;
 860                goto bail;
 861        }
 862
 863        if (vma->vm_flags & VM_WRITE) {
 864                qib_devinfo(dd->pcidev,
 865                        "Can't map eager buffers as writable (flags=%lx)\n",
 866                        vma->vm_flags);
 867                ret = -EPERM;
 868                goto bail;
 869        }
 870        /* don't allow them to later change to writeable with mprotect */
 871        vma->vm_flags &= ~VM_MAYWRITE;
 872
 873        start = vma->vm_start;
 874
 875        for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) {
 876                pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
 877                ret = remap_pfn_range(vma, start, pfn, size,
 878                                      vma->vm_page_prot);
 879                if (ret < 0)
 880                        goto bail;
 881        }
 882        ret = 0;
 883
 884bail:
 885        return ret;
 886}
 887
 888/*
 889 * qib_file_vma_fault - handle a VMA page fault.
 890 */
 891static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 892{
 893        struct page *page;
 894
 895        page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
 896        if (!page)
 897                return VM_FAULT_SIGBUS;
 898
 899        get_page(page);
 900        vmf->page = page;
 901
 902        return 0;
 903}
 904
 905static struct vm_operations_struct qib_file_vm_ops = {
 906        .fault = qib_file_vma_fault,
 907};
 908
 909static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
 910                       struct qib_ctxtdata *rcd, unsigned subctxt)
 911{
 912        struct qib_devdata *dd = rcd->dd;
 913        unsigned subctxt_cnt;
 914        unsigned long len;
 915        void *addr;
 916        size_t size;
 917        int ret = 0;
 918
 919        subctxt_cnt = rcd->subctxt_cnt;
 920        size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
 921
 922        /*
 923         * Each process has all the subctxt uregbase, rcvhdrq, and
 924         * rcvegrbufs mmapped - as an array for all the processes,
 925         * and also separately for this process.
 926         */
 927        if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) {
 928                addr = rcd->subctxt_uregbase;
 929                size = PAGE_SIZE * subctxt_cnt;
 930        } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) {
 931                addr = rcd->subctxt_rcvhdr_base;
 932                size = rcd->rcvhdrq_size * subctxt_cnt;
 933        } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) {
 934                addr = rcd->subctxt_rcvegrbuf;
 935                size *= subctxt_cnt;
 936        } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase +
 937                                        PAGE_SIZE * subctxt)) {
 938                addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt;
 939                size = PAGE_SIZE;
 940        } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base +
 941                                        rcd->rcvhdrq_size * subctxt)) {
 942                addr = rcd->subctxt_rcvhdr_base +
 943                        rcd->rcvhdrq_size * subctxt;
 944                size = rcd->rcvhdrq_size;
 945        } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) {
 946                addr = rcd->user_event_mask;
 947                size = PAGE_SIZE;
 948        } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf +
 949                                        size * subctxt)) {
 950                addr = rcd->subctxt_rcvegrbuf + size * subctxt;
 951                /* rcvegrbufs are read-only on the slave */
 952                if (vma->vm_flags & VM_WRITE) {
 953                        qib_devinfo(dd->pcidev,
 954                                 "Can't map eager buffers as "
 955                                 "writable (flags=%lx)\n", vma->vm_flags);
 956                        ret = -EPERM;
 957                        goto bail;
 958                }
 959                /*
 960                 * Don't allow permission to later change to writeable
 961                 * with mprotect.
 962                 */
 963                vma->vm_flags &= ~VM_MAYWRITE;
 964        } else
 965                goto bail;
 966        len = vma->vm_end - vma->vm_start;
 967        if (len > size) {
 968                ret = -EINVAL;
 969                goto bail;
 970        }
 971
 972        vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
 973        vma->vm_ops = &qib_file_vm_ops;
 974        vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 975        ret = 1;
 976
 977bail:
 978        return ret;
 979}
 980
 981/**
 982 * qib_mmapf - mmap various structures into user space
 983 * @fp: the file pointer
 984 * @vma: the VM area
 985 *
 986 * We use this to have a shared buffer between the kernel and the user code
 987 * for the rcvhdr queue, egr buffers, and the per-context user regs and pio
 988 * buffers in the chip.  We have the open and close entries so we can bump
 989 * the ref count and keep the driver from being unloaded while still mapped.
 990 */
 991static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
 992{
 993        struct qib_ctxtdata *rcd;
 994        struct qib_devdata *dd;
 995        u64 pgaddr, ureg;
 996        unsigned piobufs, piocnt;
 997        int ret, match = 1;
 998
 999        rcd = ctxt_fp(fp);
1000        if (!rcd || !(vma->vm_flags & VM_SHARED)) {
1001                ret = -EINVAL;
1002                goto bail;
1003        }
1004        dd = rcd->dd;
1005
1006        /*
1007         * This is the qib_do_user_init() code, mapping the shared buffers
1008         * and per-context user registers into the user process. The address
1009         * referred to by vm_pgoff is the file offset passed via mmap().
1010         * For shared contexts, this is the kernel vmalloc() address of the
1011         * pages to share with the master.
1012         * For non-shared or master ctxts, this is a physical address.
1013         * We only do one mmap for each space mapped.
1014         */
1015        pgaddr = vma->vm_pgoff << PAGE_SHIFT;
1016
1017        /*
1018         * Check for 0 in case one of the allocations failed, but user
1019         * called mmap anyway.
1020         */
1021        if (!pgaddr)  {
1022                ret = -EINVAL;
1023                goto bail;
1024        }
1025
1026        /*
1027         * Physical addresses must fit in 40 bits for our hardware.
1028         * Check for kernel virtual addresses first, anything else must
1029         * match a HW or memory address.
1030         */
1031        ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
1032        if (ret) {
1033                if (ret > 0)
1034                        ret = 0;
1035                goto bail;
1036        }
1037
1038        ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
1039        if (!rcd->subctxt_cnt) {
1040                /* ctxt is not shared */
1041                piocnt = rcd->piocnt;
1042                piobufs = rcd->piobufs;
1043        } else if (!subctxt_fp(fp)) {
1044                /* caller is the master */
1045                piocnt = (rcd->piocnt / rcd->subctxt_cnt) +
1046                         (rcd->piocnt % rcd->subctxt_cnt);
1047                piobufs = rcd->piobufs +
1048                        dd->palign * (rcd->piocnt - piocnt);
1049        } else {
1050                unsigned slave = subctxt_fp(fp) - 1;
1051
1052                /* caller is a slave */
1053                piocnt = rcd->piocnt / rcd->subctxt_cnt;
1054                piobufs = rcd->piobufs + dd->palign * piocnt * slave;
1055        }
1056
1057        if (pgaddr == ureg)
1058                ret = mmap_ureg(vma, dd, ureg);
1059        else if (pgaddr == piobufs)
1060                ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
1061        else if (pgaddr == dd->pioavailregs_phys)
1062                /* in-memory copy of pioavail registers */
1063                ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1064                                   (void *) dd->pioavailregs_dma, 0,
1065                                   "pioavail registers");
1066        else if (pgaddr == rcd->rcvegr_phys)
1067                ret = mmap_rcvegrbufs(vma, rcd);
1068        else if (pgaddr == (u64) rcd->rcvhdrq_phys)
1069                /*
1070                 * The rcvhdrq itself; multiple pages, contiguous
1071                 * from an i/o perspective.  Shared contexts need
1072                 * to map r/w, so we allow writing.
1073                 */
1074                ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
1075                                   rcd->rcvhdrq, 1, "rcvhdrq");
1076        else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys)
1077                /* in-memory copy of rcvhdrq tail register */
1078                ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1079                                   rcd->rcvhdrtail_kvaddr, 0,
1080                                   "rcvhdrq tail");
1081        else
1082                match = 0;
1083        if (!match)
1084                ret = -EINVAL;
1085
1086        vma->vm_private_data = NULL;
1087
1088        if (ret < 0)
1089                qib_devinfo(dd->pcidev,
1090                         "mmap Failure %d: off %llx len %lx\n",
1091                         -ret, (unsigned long long)pgaddr,
1092                         vma->vm_end - vma->vm_start);
1093bail:
1094        return ret;
1095}
1096
1097static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
1098                                    struct file *fp,
1099                                    struct poll_table_struct *pt)
1100{
1101        struct qib_devdata *dd = rcd->dd;
1102        unsigned pollflag;
1103
1104        poll_wait(fp, &rcd->wait, pt);
1105
1106        spin_lock_irq(&dd->uctxt_lock);
1107        if (rcd->urgent != rcd->urgent_poll) {
1108                pollflag = POLLIN | POLLRDNORM;
1109                rcd->urgent_poll = rcd->urgent;
1110        } else {
1111                pollflag = 0;
1112                set_bit(QIB_CTXT_WAITING_URG, &rcd->flag);
1113        }
1114        spin_unlock_irq(&dd->uctxt_lock);
1115
1116        return pollflag;
1117}
1118
1119static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
1120                                  struct file *fp,
1121                                  struct poll_table_struct *pt)
1122{
1123        struct qib_devdata *dd = rcd->dd;
1124        unsigned pollflag;
1125
1126        poll_wait(fp, &rcd->wait, pt);
1127
1128        spin_lock_irq(&dd->uctxt_lock);
1129        if (dd->f_hdrqempty(rcd)) {
1130                set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag);
1131                dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
1132                pollflag = 0;
1133        } else
1134                pollflag = POLLIN | POLLRDNORM;
1135        spin_unlock_irq(&dd->uctxt_lock);
1136
1137        return pollflag;
1138}
1139
1140static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
1141{
1142        struct qib_ctxtdata *rcd;
1143        unsigned pollflag;
1144
1145        rcd = ctxt_fp(fp);
1146        if (!rcd)
1147                pollflag = POLLERR;
1148        else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
1149                pollflag = qib_poll_urgent(rcd, fp, pt);
1150        else  if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
1151                pollflag = qib_poll_next(rcd, fp, pt);
1152        else /* invalid */
1153                pollflag = POLLERR;
1154
1155        return pollflag;
1156}
1157
1158static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
1159{
1160        struct qib_filedata *fd = fp->private_data;
1161        const unsigned int weight = cpumask_weight(&current->cpus_allowed);
1162        const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
1163        int local_cpu;
1164
1165        /*
1166         * If process has NOT already set it's affinity, select and
1167         * reserve a processor for it on the local NUMA node.
1168         */
1169        if ((weight >= qib_cpulist_count) &&
1170                (cpumask_weight(local_mask) <= qib_cpulist_count)) {
1171                for_each_cpu(local_cpu, local_mask)
1172                        if (!test_and_set_bit(local_cpu, qib_cpulist)) {
1173                                fd->rec_cpu_num = local_cpu;
1174                                return;
1175                        }
1176        }
1177
1178        /*
1179         * If process has NOT already set it's affinity, select and
1180         * reserve a processor for it, as a rendevous for all
1181         * users of the driver.  If they don't actually later
1182         * set affinity to this cpu, or set it to some other cpu,
1183         * it just means that sooner or later we don't recommend
1184         * a cpu, and let the scheduler do it's best.
1185         */
1186        if (weight >= qib_cpulist_count) {
1187                int cpu;
1188                cpu = find_first_zero_bit(qib_cpulist,
1189                                          qib_cpulist_count);
1190                if (cpu == qib_cpulist_count)
1191                        qib_dev_err(dd,
1192                        "no cpus avail for affinity PID %u\n",
1193                        current->pid);
1194                else {
1195                        __set_bit(cpu, qib_cpulist);
1196                        fd->rec_cpu_num = cpu;
1197                }
1198        }
1199}
1200
1201/*
1202 * Check that userland and driver are compatible for subcontexts.
1203 */
1204static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
1205{
1206        /* this code is written long-hand for clarity */
1207        if (QIB_USER_SWMAJOR != user_swmajor) {
1208                /* no promise of compatibility if major mismatch */
1209                return 0;
1210        }
1211        if (QIB_USER_SWMAJOR == 1) {
1212                switch (QIB_USER_SWMINOR) {
1213                case 0:
1214                case 1:
1215                case 2:
1216                        /* no subctxt implementation so cannot be compatible */
1217                        return 0;
1218                case 3:
1219                        /* 3 is only compatible with itself */
1220                        return user_swminor == 3;
1221                default:
1222                        /* >= 4 are compatible (or are expected to be) */
1223                        return user_swminor <= QIB_USER_SWMINOR;
1224                }
1225        }
1226        /* make no promises yet for future major versions */
1227        return 0;
1228}
1229
1230static int init_subctxts(struct qib_devdata *dd,
1231                         struct qib_ctxtdata *rcd,
1232                         const struct qib_user_info *uinfo)
1233{
1234        int ret = 0;
1235        unsigned num_subctxts;
1236        size_t size;
1237
1238        /*
1239         * If the user is requesting zero subctxts,
1240         * skip the subctxt allocation.
1241         */
1242        if (uinfo->spu_subctxt_cnt <= 0)
1243                goto bail;
1244        num_subctxts = uinfo->spu_subctxt_cnt;
1245
1246        /* Check for subctxt compatibility */
1247        if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
1248                uinfo->spu_userversion & 0xffff)) {
1249                qib_devinfo(dd->pcidev,
1250                         "Mismatched user version (%d.%d) and driver "
1251                         "version (%d.%d) while context sharing. Ensure "
1252                         "that driver and library are from the same "
1253                         "release.\n",
1254                         (int) (uinfo->spu_userversion >> 16),
1255                         (int) (uinfo->spu_userversion & 0xffff),
1256                         QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
1257                goto bail;
1258        }
1259        if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
1260                ret = -EINVAL;
1261                goto bail;
1262        }
1263
1264        rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
1265        if (!rcd->subctxt_uregbase) {
1266                ret = -ENOMEM;
1267                goto bail;
1268        }
1269        /* Note: rcd->rcvhdrq_size isn't initialized yet. */
1270        size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1271                     sizeof(u32), PAGE_SIZE) * num_subctxts;
1272        rcd->subctxt_rcvhdr_base = vmalloc_user(size);
1273        if (!rcd->subctxt_rcvhdr_base) {
1274                ret = -ENOMEM;
1275                goto bail_ureg;
1276        }
1277
1278        rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
1279                                              rcd->rcvegrbuf_size *
1280                                              num_subctxts);
1281        if (!rcd->subctxt_rcvegrbuf) {
1282                ret = -ENOMEM;
1283                goto bail_rhdr;
1284        }
1285
1286        rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
1287        rcd->subctxt_id = uinfo->spu_subctxt_id;
1288        rcd->active_slaves = 1;
1289        rcd->redirect_seq_cnt = 1;
1290        set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1291        goto bail;
1292
1293bail_rhdr:
1294        vfree(rcd->subctxt_rcvhdr_base);
1295bail_ureg:
1296        vfree(rcd->subctxt_uregbase);
1297        rcd->subctxt_uregbase = NULL;
1298bail:
1299        return ret;
1300}
1301
1302static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
1303                      struct file *fp, const struct qib_user_info *uinfo)
1304{
1305        struct qib_filedata *fd = fp->private_data;
1306        struct qib_devdata *dd = ppd->dd;
1307        struct qib_ctxtdata *rcd;
1308        void *ptmp = NULL;
1309        int ret;
1310        int numa_id;
1311
1312        assign_ctxt_affinity(fp, dd);
1313
1314        numa_id = qib_numa_aware ? ((fd->rec_cpu_num != -1) ?
1315                cpu_to_node(fd->rec_cpu_num) :
1316                numa_node_id()) : dd->assigned_node_id;
1317
1318        rcd = qib_create_ctxtdata(ppd, ctxt, numa_id);
1319
1320        /*
1321         * Allocate memory for use in qib_tid_update() at open to
1322         * reduce cost of expected send setup per message segment
1323         */
1324        if (rcd)
1325                ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
1326                               dd->rcvtidcnt * sizeof(struct page **),
1327                               GFP_KERNEL);
1328
1329        if (!rcd || !ptmp) {
1330                qib_dev_err(dd,
1331                        "Unable to allocate ctxtdata memory, failing open\n");
1332                ret = -ENOMEM;
1333                goto bailerr;
1334        }
1335        rcd->userversion = uinfo->spu_userversion;
1336        ret = init_subctxts(dd, rcd, uinfo);
1337        if (ret)
1338                goto bailerr;
1339        rcd->tid_pg_list = ptmp;
1340        rcd->pid = current->pid;
1341        init_waitqueue_head(&dd->rcd[ctxt]->wait);
1342        strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
1343        ctxt_fp(fp) = rcd;
1344        qib_stats.sps_ctxts++;
1345        dd->freectxts--;
1346        ret = 0;
1347        goto bail;
1348
1349bailerr:
1350        if (fd->rec_cpu_num != -1)
1351                __clear_bit(fd->rec_cpu_num, qib_cpulist);
1352
1353        dd->rcd[ctxt] = NULL;
1354        kfree(rcd);
1355        kfree(ptmp);
1356bail:
1357        return ret;
1358}
1359
1360static inline int usable(struct qib_pportdata *ppd)
1361{
1362        struct qib_devdata *dd = ppd->dd;
1363
1364        return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
1365                (ppd->lflags & QIBL_LINKACTIVE);
1366}
1367
1368/*
1369 * Select a context on the given device, either using a requested port
1370 * or the port based on the context number.
1371 */
1372static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
1373                            const struct qib_user_info *uinfo)
1374{
1375        struct qib_pportdata *ppd = NULL;
1376        int ret, ctxt;
1377
1378        if (port) {
1379                if (!usable(dd->pport + port - 1)) {
1380                        ret = -ENETDOWN;
1381                        goto done;
1382                } else
1383                        ppd = dd->pport + port - 1;
1384        }
1385        for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt];
1386             ctxt++)
1387                ;
1388        if (ctxt == dd->cfgctxts) {
1389                ret = -EBUSY;
1390                goto done;
1391        }
1392        if (!ppd) {
1393                u32 pidx = ctxt % dd->num_pports;
1394                if (usable(dd->pport + pidx))
1395                        ppd = dd->pport + pidx;
1396                else {
1397                        for (pidx = 0; pidx < dd->num_pports && !ppd;
1398                             pidx++)
1399                                if (usable(dd->pport + pidx))
1400                                        ppd = dd->pport + pidx;
1401                }
1402        }
1403        ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN;
1404done:
1405        return ret;
1406}
1407
1408static int find_free_ctxt(int unit, struct file *fp,
1409                          const struct qib_user_info *uinfo)
1410{
1411        struct qib_devdata *dd = qib_lookup(unit);
1412        int ret;
1413
1414        if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports))
1415                ret = -ENODEV;
1416        else
1417                ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo);
1418
1419        return ret;
1420}
1421
1422static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1423                      unsigned alg)
1424{
1425        struct qib_devdata *udd = NULL;
1426        int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i;
1427        u32 port = uinfo->spu_port, ctxt;
1428
1429        devmax = qib_count_units(&npresent, &nup);
1430        if (!npresent) {
1431                ret = -ENXIO;
1432                goto done;
1433        }
1434        if (nup == 0) {
1435                ret = -ENETDOWN;
1436                goto done;
1437        }
1438
1439        if (alg == QIB_PORT_ALG_ACROSS) {
1440                unsigned inuse = ~0U;
1441                /* find device (with ACTIVE ports) with fewest ctxts in use */
1442                for (ndev = 0; ndev < devmax; ndev++) {
1443                        struct qib_devdata *dd = qib_lookup(ndev);
1444                        unsigned cused = 0, cfree = 0, pusable = 0;
1445                        if (!dd)
1446                                continue;
1447                        if (port && port <= dd->num_pports &&
1448                            usable(dd->pport + port - 1))
1449                                pusable = 1;
1450                        else
1451                                for (i = 0; i < dd->num_pports; i++)
1452                                        if (usable(dd->pport + i))
1453                                                pusable++;
1454                        if (!pusable)
1455                                continue;
1456                        for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
1457                             ctxt++)
1458                                if (dd->rcd[ctxt])
1459                                        cused++;
1460                                else
1461                                        cfree++;
1462                        if (pusable && cfree && cused < inuse) {
1463                                udd = dd;
1464                                inuse = cused;
1465                        }
1466                }
1467                if (udd) {
1468                        ret = choose_port_ctxt(fp, udd, port, uinfo);
1469                        goto done;
1470                }
1471        } else {
1472                for (ndev = 0; ndev < devmax; ndev++) {
1473                        struct qib_devdata *dd = qib_lookup(ndev);
1474                        if (dd) {
1475                                ret = choose_port_ctxt(fp, dd, port, uinfo);
1476                                if (!ret)
1477                                        goto done;
1478                                if (ret == -EBUSY)
1479                                        dusable++;
1480                        }
1481                }
1482        }
1483        ret = dusable ? -EBUSY : -ENETDOWN;
1484
1485done:
1486        return ret;
1487}
1488
1489static int find_shared_ctxt(struct file *fp,
1490                            const struct qib_user_info *uinfo)
1491{
1492        int devmax, ndev, i;
1493        int ret = 0;
1494
1495        devmax = qib_count_units(NULL, NULL);
1496
1497        for (ndev = 0; ndev < devmax; ndev++) {
1498                struct qib_devdata *dd = qib_lookup(ndev);
1499
1500                /* device portion of usable() */
1501                if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
1502                        continue;
1503                for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
1504                        struct qib_ctxtdata *rcd = dd->rcd[i];
1505
1506                        /* Skip ctxts which are not yet open */
1507                        if (!rcd || !rcd->cnt)
1508                                continue;
1509                        /* Skip ctxt if it doesn't match the requested one */
1510                        if (rcd->subctxt_id != uinfo->spu_subctxt_id)
1511                                continue;
1512                        /* Verify the sharing process matches the master */
1513                        if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt ||
1514                            rcd->userversion != uinfo->spu_userversion ||
1515                            rcd->cnt >= rcd->subctxt_cnt) {
1516                                ret = -EINVAL;
1517                                goto done;
1518                        }
1519                        ctxt_fp(fp) = rcd;
1520                        subctxt_fp(fp) = rcd->cnt++;
1521                        rcd->subpid[subctxt_fp(fp)] = current->pid;
1522                        tidcursor_fp(fp) = 0;
1523                        rcd->active_slaves |= 1 << subctxt_fp(fp);
1524                        ret = 1;
1525                        goto done;
1526                }
1527        }
1528
1529done:
1530        return ret;
1531}
1532
1533static int qib_open(struct inode *in, struct file *fp)
1534{
1535        /* The real work is performed later in qib_assign_ctxt() */
1536        fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL);
1537        if (fp->private_data) /* no cpu affinity by default */
1538                ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1;
1539        return fp->private_data ? 0 : -ENOMEM;
1540}
1541
1542static int find_hca(unsigned int cpu, int *unit)
1543{
1544        int ret = 0, devmax, npresent, nup, ndev;
1545
1546        *unit = -1;
1547
1548        devmax = qib_count_units(&npresent, &nup);
1549        if (!npresent) {
1550                ret = -ENXIO;
1551                goto done;
1552        }
1553        if (!nup) {
1554                ret = -ENETDOWN;
1555                goto done;
1556        }
1557        for (ndev = 0; ndev < devmax; ndev++) {
1558                struct qib_devdata *dd = qib_lookup(ndev);
1559                if (dd) {
1560                        if (pcibus_to_node(dd->pcidev->bus) < 0) {
1561                                ret = -EINVAL;
1562                                goto done;
1563                        }
1564                        if (cpu_to_node(cpu) ==
1565                                pcibus_to_node(dd->pcidev->bus)) {
1566                                *unit = ndev;
1567                                goto done;
1568                        }
1569                }
1570        }
1571done:
1572        return ret;
1573}
1574
1575static int do_qib_user_sdma_queue_create(struct file *fp)
1576{
1577        struct qib_filedata *fd = fp->private_data;
1578        struct qib_ctxtdata *rcd = fd->rcd;
1579        struct qib_devdata *dd = rcd->dd;
1580
1581        if (dd->flags & QIB_HAS_SEND_DMA)
1582
1583                fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
1584                                                    dd->unit,
1585                                                    rcd->ctxt,
1586                                                    fd->subctxt);
1587                if (!fd->pq)
1588                        return -ENOMEM;
1589
1590        return 0;
1591}
1592
1593/*
1594 * Get ctxt early, so can set affinity prior to memory allocation.
1595 */
1596static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
1597{
1598        int ret;
1599        int i_minor;
1600        unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS;
1601
1602        /* Check to be sure we haven't already initialized this file */
1603        if (ctxt_fp(fp)) {
1604                ret = -EINVAL;
1605                goto done;
1606        }
1607
1608        /* for now, if major version is different, bail */
1609        swmajor = uinfo->spu_userversion >> 16;
1610        if (swmajor != QIB_USER_SWMAJOR) {
1611                ret = -ENODEV;
1612                goto done;
1613        }
1614
1615        swminor = uinfo->spu_userversion & 0xffff;
1616
1617        if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT)
1618                alg = uinfo->spu_port_alg;
1619
1620        mutex_lock(&qib_mutex);
1621
1622        if (qib_compatible_subctxts(swmajor, swminor) &&
1623            uinfo->spu_subctxt_cnt) {
1624                ret = find_shared_ctxt(fp, uinfo);
1625                if (ret > 0) {
1626                        ret = do_qib_user_sdma_queue_create(fp);
1627                        if (!ret)
1628                                assign_ctxt_affinity(fp, (ctxt_fp(fp))->dd);
1629                        goto done_ok;
1630                }
1631        }
1632
1633        i_minor = iminor(file_inode(fp)) - QIB_USER_MINOR_BASE;
1634        if (i_minor)
1635                ret = find_free_ctxt(i_minor - 1, fp, uinfo);
1636        else {
1637                int unit;
1638                const unsigned int cpu = cpumask_first(&current->cpus_allowed);
1639                const unsigned int weight =
1640                        cpumask_weight(&current->cpus_allowed);
1641
1642                if (weight == 1 && !test_bit(cpu, qib_cpulist))
1643                        if (!find_hca(cpu, &unit) && unit >= 0)
1644                                if (!find_free_ctxt(unit, fp, uinfo)) {
1645                                        ret = 0;
1646                                        goto done_chk_sdma;
1647                                }
1648                ret = get_a_ctxt(fp, uinfo, alg);
1649        }
1650
1651done_chk_sdma:
1652        if (!ret)
1653                ret = do_qib_user_sdma_queue_create(fp);
1654done_ok:
1655        mutex_unlock(&qib_mutex);
1656
1657done:
1658        return ret;
1659}
1660
1661
1662static int qib_do_user_init(struct file *fp,
1663                            const struct qib_user_info *uinfo)
1664{
1665        int ret;
1666        struct qib_ctxtdata *rcd = ctxt_fp(fp);
1667        struct qib_devdata *dd;
1668        unsigned uctxt;
1669
1670        /* Subctxts don't need to initialize anything since master did it. */
1671        if (subctxt_fp(fp)) {
1672                ret = wait_event_interruptible(rcd->wait,
1673                        !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
1674                goto bail;
1675        }
1676
1677        dd = rcd->dd;
1678
1679        /* some ctxts may get extra buffers, calculate that here */
1680        uctxt = rcd->ctxt - dd->first_user_ctxt;
1681        if (uctxt < dd->ctxts_extrabuf) {
1682                rcd->piocnt = dd->pbufsctxt + 1;
1683                rcd->pio_base = rcd->piocnt * uctxt;
1684        } else {
1685                rcd->piocnt = dd->pbufsctxt;
1686                rcd->pio_base = rcd->piocnt * uctxt +
1687                        dd->ctxts_extrabuf;
1688        }
1689
1690        /*
1691         * All user buffers are 2KB buffers.  If we ever support
1692         * giving 4KB buffers to user processes, this will need some
1693         * work.  Can't use piobufbase directly, because it has
1694         * both 2K and 4K buffer base values.  So check and handle.
1695         */
1696        if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
1697                if (rcd->pio_base >= dd->piobcnt2k) {
1698                        qib_dev_err(dd,
1699                                    "%u:ctxt%u: no 2KB buffers available\n",
1700                                    dd->unit, rcd->ctxt);
1701                        ret = -ENOBUFS;
1702                        goto bail;
1703                }
1704                rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
1705                qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
1706                            rcd->ctxt, rcd->piocnt);
1707        }
1708
1709        rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
1710        qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1711                               TXCHK_CHG_TYPE_USER, rcd);
1712        /*
1713         * try to ensure that processes start up with consistent avail update
1714         * for their own range, at least.   If system very quiet, it might
1715         * have the in-memory copy out of date at startup for this range of
1716         * buffers, when a context gets re-used.  Do after the chg_pioavail
1717         * and before the rest of setup, so it's "almost certain" the dma
1718         * will have occurred (can't 100% guarantee, but should be many
1719         * decimals of 9s, with this ordering), given how much else happens
1720         * after this.
1721         */
1722        dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
1723
1724        /*
1725         * Now allocate the rcvhdr Q and eager TIDs; skip the TID
1726         * array for time being.  If rcd->ctxt > chip-supported,
1727         * we need to do extra stuff here to handle by handling overflow
1728         * through ctxt 0, someday
1729         */
1730        ret = qib_create_rcvhdrq(dd, rcd);
1731        if (!ret)
1732                ret = qib_setup_eagerbufs(rcd);
1733        if (ret)
1734                goto bail_pio;
1735
1736        rcd->tidcursor = 0; /* start at beginning after open */
1737
1738        /* initialize poll variables... */
1739        rcd->urgent = 0;
1740        rcd->urgent_poll = 0;
1741
1742        /*
1743         * Now enable the ctxt for receive.
1744         * For chips that are set to DMA the tail register to memory
1745         * when they change (and when the update bit transitions from
1746         * 0 to 1.  So for those chips, we turn it off and then back on.
1747         * This will (very briefly) affect any other open ctxts, but the
1748         * duration is very short, and therefore isn't an issue.  We
1749         * explicitly set the in-memory tail copy to 0 beforehand, so we
1750         * don't have to wait to be sure the DMA update has happened
1751         * (chip resets head/tail to 0 on transition to enable).
1752         */
1753        if (rcd->rcvhdrtail_kvaddr)
1754                qib_clear_rcvhdrtail(rcd);
1755
1756        dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
1757                      rcd->ctxt);
1758
1759        /* Notify any waiting slaves */
1760        if (rcd->subctxt_cnt) {
1761                clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1762                wake_up(&rcd->wait);
1763        }
1764        return 0;
1765
1766bail_pio:
1767        qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1768                               TXCHK_CHG_TYPE_KERN, rcd);
1769bail:
1770        return ret;
1771}
1772
1773/**
1774 * unlock_exptid - unlock any expected TID entries context still had in use
1775 * @rcd: ctxt
1776 *
1777 * We don't actually update the chip here, because we do a bulk update
1778 * below, using f_clear_tids.
1779 */
1780static void unlock_expected_tids(struct qib_ctxtdata *rcd)
1781{
1782        struct qib_devdata *dd = rcd->dd;
1783        int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
1784        int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt;
1785
1786        for (i = ctxt_tidbase; i < maxtid; i++) {
1787                struct page *p = dd->pageshadow[i];
1788                dma_addr_t phys;
1789
1790                if (!p)
1791                        continue;
1792
1793                phys = dd->physshadow[i];
1794                dd->physshadow[i] = dd->tidinvalid;
1795                dd->pageshadow[i] = NULL;
1796                pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
1797                               PCI_DMA_FROMDEVICE);
1798                qib_release_user_pages(&p, 1);
1799                cnt++;
1800        }
1801}
1802
1803static int qib_close(struct inode *in, struct file *fp)
1804{
1805        int ret = 0;
1806        struct qib_filedata *fd;
1807        struct qib_ctxtdata *rcd;
1808        struct qib_devdata *dd;
1809        unsigned long flags;
1810        unsigned ctxt;
1811        pid_t pid;
1812
1813        mutex_lock(&qib_mutex);
1814
1815        fd = fp->private_data;
1816        fp->private_data = NULL;
1817        rcd = fd->rcd;
1818        if (!rcd) {
1819                mutex_unlock(&qib_mutex);
1820                goto bail;
1821        }
1822
1823        dd = rcd->dd;
1824
1825        /* ensure all pio buffer writes in progress are flushed */
1826        qib_flush_wc();
1827
1828        /* drain user sdma queue */
1829        if (fd->pq) {
1830                qib_user_sdma_queue_drain(rcd->ppd, fd->pq);
1831                qib_user_sdma_queue_destroy(fd->pq);
1832        }
1833
1834        if (fd->rec_cpu_num != -1)
1835                __clear_bit(fd->rec_cpu_num, qib_cpulist);
1836
1837        if (--rcd->cnt) {
1838                /*
1839                 * XXX If the master closes the context before the slave(s),
1840                 * revoke the mmap for the eager receive queue so
1841                 * the slave(s) don't wait for receive data forever.
1842                 */
1843                rcd->active_slaves &= ~(1 << fd->subctxt);
1844                rcd->subpid[fd->subctxt] = 0;
1845                mutex_unlock(&qib_mutex);
1846                goto bail;
1847        }
1848
1849        /* early; no interrupt users after this */
1850        spin_lock_irqsave(&dd->uctxt_lock, flags);
1851        ctxt = rcd->ctxt;
1852        dd->rcd[ctxt] = NULL;
1853        pid = rcd->pid;
1854        rcd->pid = 0;
1855        spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1856
1857        if (rcd->rcvwait_to || rcd->piowait_to ||
1858            rcd->rcvnowait || rcd->pionowait) {
1859                rcd->rcvwait_to = 0;
1860                rcd->piowait_to = 0;
1861                rcd->rcvnowait = 0;
1862                rcd->pionowait = 0;
1863        }
1864        if (rcd->flag)
1865                rcd->flag = 0;
1866
1867        if (dd->kregbase) {
1868                /* atomically clear receive enable ctxt and intr avail. */
1869                dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
1870                                  QIB_RCVCTRL_INTRAVAIL_DIS, ctxt);
1871
1872                /* clean up the pkeys for this ctxt user */
1873                qib_clean_part_key(rcd, dd);
1874                qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
1875                qib_chg_pioavailkernel(dd, rcd->pio_base,
1876                                       rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL);
1877
1878                dd->f_clear_tids(dd, rcd);
1879
1880                if (dd->pageshadow)
1881                        unlock_expected_tids(rcd);
1882                qib_stats.sps_ctxts--;
1883                dd->freectxts++;
1884        }
1885
1886        mutex_unlock(&qib_mutex);
1887        qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */
1888
1889bail:
1890        kfree(fd);
1891        return ret;
1892}
1893
1894static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
1895{
1896        struct qib_ctxt_info info;
1897        int ret;
1898        size_t sz;
1899        struct qib_ctxtdata *rcd = ctxt_fp(fp);
1900        struct qib_filedata *fd;
1901
1902        fd = fp->private_data;
1903
1904        info.num_active = qib_count_active_units();
1905        info.unit = rcd->dd->unit;
1906        info.port = rcd->ppd->port;
1907        info.ctxt = rcd->ctxt;
1908        info.subctxt =  subctxt_fp(fp);
1909        /* Number of user ctxts available for this device. */
1910        info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
1911        info.num_subctxts = rcd->subctxt_cnt;
1912        info.rec_cpu = fd->rec_cpu_num;
1913        sz = sizeof(info);
1914
1915        if (copy_to_user(uinfo, &info, sz)) {
1916                ret = -EFAULT;
1917                goto bail;
1918        }
1919        ret = 0;
1920
1921bail:
1922        return ret;
1923}
1924
1925static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
1926                                 u32 __user *inflightp)
1927{
1928        const u32 val = qib_user_sdma_inflight_counter(pq);
1929
1930        if (put_user(val, inflightp))
1931                return -EFAULT;
1932
1933        return 0;
1934}
1935
1936static int qib_sdma_get_complete(struct qib_pportdata *ppd,
1937                                 struct qib_user_sdma_queue *pq,
1938                                 u32 __user *completep)
1939{
1940        u32 val;
1941        int err;
1942
1943        if (!pq)
1944                return -EINVAL;
1945
1946        err = qib_user_sdma_make_progress(ppd, pq);
1947        if (err < 0)
1948                return err;
1949
1950        val = qib_user_sdma_complete_counter(pq);
1951        if (put_user(val, completep))
1952                return -EFAULT;
1953
1954        return 0;
1955}
1956
1957static int disarm_req_delay(struct qib_ctxtdata *rcd)
1958{
1959        int ret = 0;
1960
1961        if (!usable(rcd->ppd)) {
1962                int i;
1963                /*
1964                 * if link is down, or otherwise not usable, delay
1965                 * the caller up to 30 seconds, so we don't thrash
1966                 * in trying to get the chip back to ACTIVE, and
1967                 * set flag so they make the call again.
1968                 */
1969                if (rcd->user_event_mask) {
1970                        /*
1971                         * subctxt_cnt is 0 if not shared, so do base
1972                         * separately, first, then remaining subctxt, if any
1973                         */
1974                        set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1975                                &rcd->user_event_mask[0]);
1976                        for (i = 1; i < rcd->subctxt_cnt; i++)
1977                                set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1978                                        &rcd->user_event_mask[i]);
1979                }
1980                for (i = 0; !usable(rcd->ppd) && i < 300; i++)
1981                        msleep(100);
1982                ret = -ENETDOWN;
1983        }
1984        return ret;
1985}
1986
1987/*
1988 * Find all user contexts in use, and set the specified bit in their
1989 * event mask.
1990 * See also find_ctxt() for a similar use, that is specific to send buffers.
1991 */
1992int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
1993{
1994        struct qib_ctxtdata *rcd;
1995        unsigned ctxt;
1996        int ret = 0;
1997        unsigned long flags;
1998
1999        spin_lock_irqsave(&ppd->dd->uctxt_lock, flags);
2000        for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
2001             ctxt++) {
2002                rcd = ppd->dd->rcd[ctxt];
2003                if (!rcd)
2004                        continue;
2005                if (rcd->user_event_mask) {
2006                        int i;
2007                        /*
2008                         * subctxt_cnt is 0 if not shared, so do base
2009                         * separately, first, then remaining subctxt, if any
2010                         */
2011                        set_bit(evtbit, &rcd->user_event_mask[0]);
2012                        for (i = 1; i < rcd->subctxt_cnt; i++)
2013                                set_bit(evtbit, &rcd->user_event_mask[i]);
2014                }
2015                ret = 1;
2016                break;
2017        }
2018        spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags);
2019
2020        return ret;
2021}
2022
2023/*
2024 * clear the event notifier events for this context.
2025 * For the DISARM_BUFS case, we also take action (this obsoletes
2026 * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards
2027 * compatibility.
2028 * Other bits don't currently require actions, just atomically clear.
2029 * User process then performs actions appropriate to bit having been
2030 * set, if desired, and checks again in future.
2031 */
2032static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
2033                              unsigned long events)
2034{
2035        int ret = 0, i;
2036
2037        for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) {
2038                if (!test_bit(i, &events))
2039                        continue;
2040                if (i == _QIB_EVENT_DISARM_BUFS_BIT) {
2041                        (void)qib_disarm_piobufs_ifneeded(rcd);
2042                        ret = disarm_req_delay(rcd);
2043                } else
2044                        clear_bit(i, &rcd->user_event_mask[subctxt]);
2045        }
2046        return ret;
2047}
2048
2049static ssize_t qib_write(struct file *fp, const char __user *data,
2050                         size_t count, loff_t *off)
2051{
2052        const struct qib_cmd __user *ucmd;
2053        struct qib_ctxtdata *rcd;
2054        const void __user *src;
2055        size_t consumed, copy = 0;
2056        struct qib_cmd cmd;
2057        ssize_t ret = 0;
2058        void *dest;
2059
2060        if (count < sizeof(cmd.type)) {
2061                ret = -EINVAL;
2062                goto bail;
2063        }
2064
2065        ucmd = (const struct qib_cmd __user *) data;
2066
2067        if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
2068                ret = -EFAULT;
2069                goto bail;
2070        }
2071
2072        consumed = sizeof(cmd.type);
2073
2074        switch (cmd.type) {
2075        case QIB_CMD_ASSIGN_CTXT:
2076        case QIB_CMD_USER_INIT:
2077                copy = sizeof(cmd.cmd.user_info);
2078                dest = &cmd.cmd.user_info;
2079                src = &ucmd->cmd.user_info;
2080                break;
2081
2082        case QIB_CMD_RECV_CTRL:
2083                copy = sizeof(cmd.cmd.recv_ctrl);
2084                dest = &cmd.cmd.recv_ctrl;
2085                src = &ucmd->cmd.recv_ctrl;
2086                break;
2087
2088        case QIB_CMD_CTXT_INFO:
2089                copy = sizeof(cmd.cmd.ctxt_info);
2090                dest = &cmd.cmd.ctxt_info;
2091                src = &ucmd->cmd.ctxt_info;
2092                break;
2093
2094        case QIB_CMD_TID_UPDATE:
2095        case QIB_CMD_TID_FREE:
2096                copy = sizeof(cmd.cmd.tid_info);
2097                dest = &cmd.cmd.tid_info;
2098                src = &ucmd->cmd.tid_info;
2099                break;
2100
2101        case QIB_CMD_SET_PART_KEY:
2102                copy = sizeof(cmd.cmd.part_key);
2103                dest = &cmd.cmd.part_key;
2104                src = &ucmd->cmd.part_key;
2105                break;
2106
2107        case QIB_CMD_DISARM_BUFS:
2108        case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */
2109                copy = 0;
2110                src = NULL;
2111                dest = NULL;
2112                break;
2113
2114        case QIB_CMD_POLL_TYPE:
2115                copy = sizeof(cmd.cmd.poll_type);
2116                dest = &cmd.cmd.poll_type;
2117                src = &ucmd->cmd.poll_type;
2118                break;
2119
2120        case QIB_CMD_ARMLAUNCH_CTRL:
2121                copy = sizeof(cmd.cmd.armlaunch_ctrl);
2122                dest = &cmd.cmd.armlaunch_ctrl;
2123                src = &ucmd->cmd.armlaunch_ctrl;
2124                break;
2125
2126        case QIB_CMD_SDMA_INFLIGHT:
2127                copy = sizeof(cmd.cmd.sdma_inflight);
2128                dest = &cmd.cmd.sdma_inflight;
2129                src = &ucmd->cmd.sdma_inflight;
2130                break;
2131
2132        case QIB_CMD_SDMA_COMPLETE:
2133                copy = sizeof(cmd.cmd.sdma_complete);
2134                dest = &cmd.cmd.sdma_complete;
2135                src = &ucmd->cmd.sdma_complete;
2136                break;
2137
2138        case QIB_CMD_ACK_EVENT:
2139                copy = sizeof(cmd.cmd.event_mask);
2140                dest = &cmd.cmd.event_mask;
2141                src = &ucmd->cmd.event_mask;
2142                break;
2143
2144        default:
2145                ret = -EINVAL;
2146                goto bail;
2147        }
2148
2149        if (copy) {
2150                if ((count - consumed) < copy) {
2151                        ret = -EINVAL;
2152                        goto bail;
2153                }
2154                if (copy_from_user(dest, src, copy)) {
2155                        ret = -EFAULT;
2156                        goto bail;
2157                }
2158                consumed += copy;
2159        }
2160
2161        rcd = ctxt_fp(fp);
2162        if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) {
2163                ret = -EINVAL;
2164                goto bail;
2165        }
2166
2167        switch (cmd.type) {
2168        case QIB_CMD_ASSIGN_CTXT:
2169                ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
2170                if (ret)
2171                        goto bail;
2172                break;
2173
2174        case QIB_CMD_USER_INIT:
2175                ret = qib_do_user_init(fp, &cmd.cmd.user_info);
2176                if (ret)
2177                        goto bail;
2178                ret = qib_get_base_info(fp, (void __user *) (unsigned long)
2179                                        cmd.cmd.user_info.spu_base_info,
2180                                        cmd.cmd.user_info.spu_base_info_size);
2181                break;
2182
2183        case QIB_CMD_RECV_CTRL:
2184                ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl);
2185                break;
2186
2187        case QIB_CMD_CTXT_INFO:
2188                ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *)
2189                                    (unsigned long) cmd.cmd.ctxt_info);
2190                break;
2191
2192        case QIB_CMD_TID_UPDATE:
2193                ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info);
2194                break;
2195
2196        case QIB_CMD_TID_FREE:
2197                ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info);
2198                break;
2199
2200        case QIB_CMD_SET_PART_KEY:
2201                ret = qib_set_part_key(rcd, cmd.cmd.part_key);
2202                break;
2203
2204        case QIB_CMD_DISARM_BUFS:
2205                (void)qib_disarm_piobufs_ifneeded(rcd);
2206                ret = disarm_req_delay(rcd);
2207                break;
2208
2209        case QIB_CMD_PIOAVAILUPD:
2210                qib_force_pio_avail_update(rcd->dd);
2211                break;
2212
2213        case QIB_CMD_POLL_TYPE:
2214                rcd->poll_type = cmd.cmd.poll_type;
2215                break;
2216
2217        case QIB_CMD_ARMLAUNCH_CTRL:
2218                rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
2219                break;
2220
2221        case QIB_CMD_SDMA_INFLIGHT:
2222                ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp),
2223                                            (u32 __user *) (unsigned long)
2224                                            cmd.cmd.sdma_inflight);
2225                break;
2226
2227        case QIB_CMD_SDMA_COMPLETE:
2228                ret = qib_sdma_get_complete(rcd->ppd,
2229                                            user_sdma_queue_fp(fp),
2230                                            (u32 __user *) (unsigned long)
2231                                            cmd.cmd.sdma_complete);
2232                break;
2233
2234        case QIB_CMD_ACK_EVENT:
2235                ret = qib_user_event_ack(rcd, subctxt_fp(fp),
2236                                         cmd.cmd.event_mask);
2237                break;
2238        }
2239
2240        if (ret >= 0)
2241                ret = consumed;
2242
2243bail:
2244        return ret;
2245}
2246
2247static ssize_t qib_aio_write(struct kiocb *iocb, const struct iovec *iov,
2248                             unsigned long dim, loff_t off)
2249{
2250        struct qib_filedata *fp = iocb->ki_filp->private_data;
2251        struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
2252        struct qib_user_sdma_queue *pq = fp->pq;
2253
2254        if (!dim || !pq)
2255                return -EINVAL;
2256
2257        return qib_user_sdma_writev(rcd, pq, iov, dim);
2258}
2259
2260static struct class *qib_class;
2261static dev_t qib_dev;
2262
2263int qib_cdev_init(int minor, const char *name,
2264                  const struct file_operations *fops,
2265                  struct cdev **cdevp, struct device **devp)
2266{
2267        const dev_t dev = MKDEV(MAJOR(qib_dev), minor);
2268        struct cdev *cdev;
2269        struct device *device = NULL;
2270        int ret;
2271
2272        cdev = cdev_alloc();
2273        if (!cdev) {
2274                pr_err("Could not allocate cdev for minor %d, %s\n",
2275                       minor, name);
2276                ret = -ENOMEM;
2277                goto done;
2278        }
2279
2280        cdev->owner = THIS_MODULE;
2281        cdev->ops = fops;
2282        kobject_set_name(&cdev->kobj, name);
2283
2284        ret = cdev_add(cdev, dev, 1);
2285        if (ret < 0) {
2286                pr_err("Could not add cdev for minor %d, %s (err %d)\n",
2287                       minor, name, -ret);
2288                goto err_cdev;
2289        }
2290
2291        device = device_create(qib_class, NULL, dev, NULL, "%s", name);
2292        if (!IS_ERR(device))
2293                goto done;
2294        ret = PTR_ERR(device);
2295        device = NULL;
2296        pr_err("Could not create device for minor %d, %s (err %d)\n",
2297               minor, name, -ret);
2298err_cdev:
2299        cdev_del(cdev);
2300        cdev = NULL;
2301done:
2302        *cdevp = cdev;
2303        *devp = device;
2304        return ret;
2305}
2306
2307void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
2308{
2309        struct device *device = *devp;
2310
2311        if (device) {
2312                device_unregister(device);
2313                *devp = NULL;
2314        }
2315
2316        if (*cdevp) {
2317                cdev_del(*cdevp);
2318                *cdevp = NULL;
2319        }
2320}
2321
2322static struct cdev *wildcard_cdev;
2323static struct device *wildcard_device;
2324
2325int __init qib_dev_init(void)
2326{
2327        int ret;
2328
2329        ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
2330        if (ret < 0) {
2331                pr_err("Could not allocate chrdev region (err %d)\n", -ret);
2332                goto done;
2333        }
2334
2335        qib_class = class_create(THIS_MODULE, "ipath");
2336        if (IS_ERR(qib_class)) {
2337                ret = PTR_ERR(qib_class);
2338                pr_err("Could not create device class (err %d)\n", -ret);
2339                unregister_chrdev_region(qib_dev, QIB_NMINORS);
2340        }
2341
2342done:
2343        return ret;
2344}
2345
2346void qib_dev_cleanup(void)
2347{
2348        if (qib_class) {
2349                class_destroy(qib_class);
2350                qib_class = NULL;
2351        }
2352
2353        unregister_chrdev_region(qib_dev, QIB_NMINORS);
2354}
2355
2356static atomic_t user_count = ATOMIC_INIT(0);
2357
2358static void qib_user_remove(struct qib_devdata *dd)
2359{
2360        if (atomic_dec_return(&user_count) == 0)
2361                qib_cdev_cleanup(&wildcard_cdev, &wildcard_device);
2362
2363        qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
2364}
2365
2366static int qib_user_add(struct qib_devdata *dd)
2367{
2368        char name[10];
2369        int ret;
2370
2371        if (atomic_inc_return(&user_count) == 1) {
2372                ret = qib_cdev_init(0, "ipath", &qib_file_ops,
2373                                    &wildcard_cdev, &wildcard_device);
2374                if (ret)
2375                        goto done;
2376        }
2377
2378        snprintf(name, sizeof(name), "ipath%d", dd->unit);
2379        ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
2380                            &dd->user_cdev, &dd->user_device);
2381        if (ret)
2382                qib_user_remove(dd);
2383done:
2384        return ret;
2385}
2386
2387/*
2388 * Create per-unit files in /dev
2389 */
2390int qib_device_create(struct qib_devdata *dd)
2391{
2392        int r, ret;
2393
2394        r = qib_user_add(dd);
2395        ret = qib_diag_add(dd);
2396        if (r && !ret)
2397                ret = r;
2398        return ret;
2399}
2400
2401/*
2402 * Remove per-unit files in /dev
2403 * void, core kernel returns no errors for this stuff
2404 */
2405void qib_device_remove(struct qib_devdata *dd)
2406{
2407        qib_user_remove(dd);
2408        qib_diag_remove(dd);
2409}
2410