linux/drivers/misc/sgi-gru/grufault.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * SN Platform GRU Driver
   4 *
   5 *              FAULT HANDLER FOR GRU DETECTED TLB MISSES
   6 *
   7 * This file contains code that handles TLB misses within the GRU.
   8 * These misses are reported either via interrupts or user polling of
   9 * the user CB.
  10 *
  11 *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved.
  12 */
  13
  14#include <linux/kernel.h>
  15#include <linux/errno.h>
  16#include <linux/spinlock.h>
  17#include <linux/mm.h>
  18#include <linux/hugetlb.h>
  19#include <linux/device.h>
  20#include <linux/io.h>
  21#include <linux/uaccess.h>
  22#include <linux/security.h>
  23#include <linux/sync_core.h>
  24#include <linux/prefetch.h>
  25#include "gru.h"
  26#include "grutables.h"
  27#include "grulib.h"
  28#include "gru_instructions.h"
  29#include <asm/uv/uv_hub.h>
  30
  31/* Return codes for vtop functions */
  32#define VTOP_SUCCESS               0
  33#define VTOP_INVALID               -1
  34#define VTOP_RETRY                 -2
  35
  36
  37/*
  38 * Test if a physical address is a valid GRU GSEG address
  39 */
  40static inline int is_gru_paddr(unsigned long paddr)
  41{
  42        return paddr >= gru_start_paddr && paddr < gru_end_paddr;
  43}
  44
  45/*
  46 * Find the vma of a GRU segment. Caller must hold mmap_lock.
  47 */
  48struct vm_area_struct *gru_find_vma(unsigned long vaddr)
  49{
  50        struct vm_area_struct *vma;
  51
  52        vma = vma_lookup(current->mm, vaddr);
  53        if (vma && vma->vm_ops == &gru_vm_ops)
  54                return vma;
  55        return NULL;
  56}
  57
  58/*
  59 * Find and lock the gts that contains the specified user vaddr.
  60 *
  61 * Returns:
  62 *      - *gts with the mmap_lock locked for read and the GTS locked.
  63 *      - NULL if vaddr invalid OR is not a valid GSEG vaddr.
  64 */
  65
  66static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
  67{
  68        struct mm_struct *mm = current->mm;
  69        struct vm_area_struct *vma;
  70        struct gru_thread_state *gts = NULL;
  71
  72        mmap_read_lock(mm);
  73        vma = gru_find_vma(vaddr);
  74        if (vma)
  75                gts = gru_find_thread_state(vma, TSID(vaddr, vma));
  76        if (gts)
  77                mutex_lock(&gts->ts_ctxlock);
  78        else
  79                mmap_read_unlock(mm);
  80        return gts;
  81}
  82
  83static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
  84{
  85        struct mm_struct *mm = current->mm;
  86        struct vm_area_struct *vma;
  87        struct gru_thread_state *gts = ERR_PTR(-EINVAL);
  88
  89        mmap_write_lock(mm);
  90        vma = gru_find_vma(vaddr);
  91        if (!vma)
  92                goto err;
  93
  94        gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
  95        if (IS_ERR(gts))
  96                goto err;
  97        mutex_lock(&gts->ts_ctxlock);
  98        mmap_write_downgrade(mm);
  99        return gts;
 100
 101err:
 102        mmap_write_unlock(mm);
 103        return gts;
 104}
 105
 106/*
 107 * Unlock a GTS that was previously locked with gru_find_lock_gts().
 108 */
 109static void gru_unlock_gts(struct gru_thread_state *gts)
 110{
 111        mutex_unlock(&gts->ts_ctxlock);
 112        mmap_read_unlock(current->mm);
 113}
 114
 115/*
 116 * Set a CB.istatus to active using a user virtual address. This must be done
 117 * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
 118 * If the line is evicted, the status may be lost. The in-cache update
 119 * is necessary to prevent the user from seeing a stale cb.istatus that will
 120 * change as soon as the TFH restart is complete. Races may cause an
 121 * occasional failure to clear the cb.istatus, but that is ok.
 122 */
 123static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
 124{
 125        if (cbk) {
 126                cbk->istatus = CBS_ACTIVE;
 127        }
 128}
 129
 130/*
 131 * Read & clear a TFM
 132 *
 133 * The GRU has an array of fault maps. A map is private to a cpu
 134 * Only one cpu will be accessing a cpu's fault map.
 135 *
 136 * This function scans the cpu-private fault map & clears all bits that
 137 * are set. The function returns a bitmap that indicates the bits that
 138 * were cleared. Note that sense the maps may be updated asynchronously by
 139 * the GRU, atomic operations must be used to clear bits.
 140 */
 141static void get_clear_fault_map(struct gru_state *gru,
 142                                struct gru_tlb_fault_map *imap,
 143                                struct gru_tlb_fault_map *dmap)
 144{
 145        unsigned long i, k;
 146        struct gru_tlb_fault_map *tfm;
 147
 148        tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id());
 149        prefetchw(tfm);         /* Helps on hardware, required for emulator */
 150        for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) {
 151                k = tfm->fault_bits[i];
 152                if (k)
 153                        k = xchg(&tfm->fault_bits[i], 0UL);
 154                imap->fault_bits[i] = k;
 155                k = tfm->done_bits[i];
 156                if (k)
 157                        k = xchg(&tfm->done_bits[i], 0UL);
 158                dmap->fault_bits[i] = k;
 159        }
 160
 161        /*
 162         * Not functionally required but helps performance. (Required
 163         * on emulator)
 164         */
 165        gru_flush_cache(tfm);
 166}
 167
 168/*
 169 * Atomic (interrupt context) & non-atomic (user context) functions to
 170 * convert a vaddr into a physical address. The size of the page
 171 * is returned in pageshift.
 172 *      returns:
 173 *                0 - successful
 174 *              < 0 - error code
 175 *                1 - (atomic only) try again in non-atomic context
 176 */
 177static int non_atomic_pte_lookup(struct vm_area_struct *vma,
 178                                 unsigned long vaddr, int write,
 179                                 unsigned long *paddr, int *pageshift)
 180{
 181        struct page *page;
 182
 183#ifdef CONFIG_HUGETLB_PAGE
 184        *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
 185#else
 186        *pageshift = PAGE_SHIFT;
 187#endif
 188        if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
 189                return -EFAULT;
 190        *paddr = page_to_phys(page);
 191        put_page(page);
 192        return 0;
 193}
 194
 195/*
 196 * atomic_pte_lookup
 197 *
 198 * Convert a user virtual address to a physical address
 199 * Only supports Intel large pages (2MB only) on x86_64.
 200 *      ZZZ - hugepage support is incomplete
 201 *
 202 * NOTE: mmap_lock is already held on entry to this function. This
 203 * guarantees existence of the page tables.
 204 */
 205static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
 206        int write, unsigned long *paddr, int *pageshift)
 207{
 208        pgd_t *pgdp;
 209        p4d_t *p4dp;
 210        pud_t *pudp;
 211        pmd_t *pmdp;
 212        pte_t pte;
 213
 214        pgdp = pgd_offset(vma->vm_mm, vaddr);
 215        if (unlikely(pgd_none(*pgdp)))
 216                goto err;
 217
 218        p4dp = p4d_offset(pgdp, vaddr);
 219        if (unlikely(p4d_none(*p4dp)))
 220                goto err;
 221
 222        pudp = pud_offset(p4dp, vaddr);
 223        if (unlikely(pud_none(*pudp)))
 224                goto err;
 225
 226        pmdp = pmd_offset(pudp, vaddr);
 227        if (unlikely(pmd_none(*pmdp)))
 228                goto err;
 229#ifdef CONFIG_X86_64
 230        if (unlikely(pmd_large(*pmdp)))
 231                pte = *(pte_t *) pmdp;
 232        else
 233#endif
 234                pte = *pte_offset_kernel(pmdp, vaddr);
 235
 236        if (unlikely(!pte_present(pte) ||
 237                     (write && (!pte_write(pte) || !pte_dirty(pte)))))
 238                return 1;
 239
 240        *paddr = pte_pfn(pte) << PAGE_SHIFT;
 241#ifdef CONFIG_HUGETLB_PAGE
 242        *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
 243#else
 244        *pageshift = PAGE_SHIFT;
 245#endif
 246        return 0;
 247
 248err:
 249        return 1;
 250}
 251
 252static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
 253                    int write, int atomic, unsigned long *gpa, int *pageshift)
 254{
 255        struct mm_struct *mm = gts->ts_mm;
 256        struct vm_area_struct *vma;
 257        unsigned long paddr;
 258        int ret, ps;
 259
 260        vma = find_vma(mm, vaddr);
 261        if (!vma)
 262                goto inval;
 263
 264        /*
 265         * Atomic lookup is faster & usually works even if called in non-atomic
 266         * context.
 267         */
 268        rmb();  /* Must/check ms_range_active before loading PTEs */
 269        ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
 270        if (ret) {
 271                if (atomic)
 272                        goto upm;
 273                if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
 274                        goto inval;
 275        }
 276        if (is_gru_paddr(paddr))
 277                goto inval;
 278        paddr = paddr & ~((1UL << ps) - 1);
 279        *gpa = uv_soc_phys_ram_to_gpa(paddr);
 280        *pageshift = ps;
 281        return VTOP_SUCCESS;
 282
 283inval:
 284        return VTOP_INVALID;
 285upm:
 286        return VTOP_RETRY;
 287}
 288
 289
 290/*
 291 * Flush a CBE from cache. The CBE is clean in the cache. Dirty the
 292 * CBE cacheline so that the line will be written back to home agent.
 293 * Otherwise the line may be silently dropped. This has no impact
 294 * except on performance.
 295 */
 296static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe)
 297{
 298        if (unlikely(cbe)) {
 299                cbe->cbrexecstatus = 0;         /* make CL dirty */
 300                gru_flush_cache(cbe);
 301        }
 302}
 303
 304/*
 305 * Preload the TLB with entries that may be required. Currently, preloading
 306 * is implemented only for BCOPY. Preload  <tlb_preload_count> pages OR to
 307 * the end of the bcopy tranfer, whichever is smaller.
 308 */
 309static void gru_preload_tlb(struct gru_state *gru,
 310                        struct gru_thread_state *gts, int atomic,
 311                        unsigned long fault_vaddr, int asid, int write,
 312                        unsigned char tlb_preload_count,
 313                        struct gru_tlb_fault_handle *tfh,
 314                        struct gru_control_block_extended *cbe)
 315{
 316        unsigned long vaddr = 0, gpa;
 317        int ret, pageshift;
 318
 319        if (cbe->opccpy != OP_BCOPY)
 320                return;
 321
 322        if (fault_vaddr == cbe->cbe_baddr0)
 323                vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
 324        else if (fault_vaddr == cbe->cbe_baddr1)
 325                vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
 326
 327        fault_vaddr &= PAGE_MASK;
 328        vaddr &= PAGE_MASK;
 329        vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
 330
 331        while (vaddr > fault_vaddr) {
 332                ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
 333                if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
 334                                          GRU_PAGESIZE(pageshift)))
 335                        return;
 336                gru_dbg(grudev,
 337                        "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
 338                        atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
 339                        vaddr, asid, write, pageshift, gpa);
 340                vaddr -= PAGE_SIZE;
 341                STAT(tlb_preload_page);
 342        }
 343}
 344
 345/*
 346 * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
 347 *      Input:
 348 *              cb    Address of user CBR. Null if not running in user context
 349 *      Return:
 350 *                0 = dropin, exception, or switch to UPM successful
 351 *                1 = range invalidate active
 352 *              < 0 = error code
 353 *
 354 */
 355static int gru_try_dropin(struct gru_state *gru,
 356                          struct gru_thread_state *gts,
 357                          struct gru_tlb_fault_handle *tfh,
 358                          struct gru_instruction_bits *cbk)
 359{
 360        struct gru_control_block_extended *cbe = NULL;
 361        unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
 362        int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
 363        unsigned long gpa = 0, vaddr = 0;
 364
 365        /*
 366         * NOTE: The GRU contains magic hardware that eliminates races between
 367         * TLB invalidates and TLB dropins. If an invalidate occurs
 368         * in the window between reading the TFH and the subsequent TLB dropin,
 369         * the dropin is ignored. This eliminates the need for additional locks.
 370         */
 371
 372        /*
 373         * Prefetch the CBE if doing TLB preloading
 374         */
 375        if (unlikely(tlb_preload_count)) {
 376                cbe = gru_tfh_to_cbe(tfh);
 377                prefetchw(cbe);
 378        }
 379
 380        /*
 381         * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
 382         * Might be a hardware race OR a stupid user. Ignore FMM because FMM
 383         * is a transient state.
 384         */
 385        if (tfh->status != TFHSTATUS_EXCEPTION) {
 386                gru_flush_cache(tfh);
 387                sync_core();
 388                if (tfh->status != TFHSTATUS_EXCEPTION)
 389                        goto failnoexception;
 390                STAT(tfh_stale_on_fault);
 391        }
 392        if (tfh->state == TFHSTATE_IDLE)
 393                goto failidle;
 394        if (tfh->state == TFHSTATE_MISS_FMM && cbk)
 395                goto failfmm;
 396
 397        write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
 398        vaddr = tfh->missvaddr;
 399        asid = tfh->missasid;
 400        indexway = tfh->indexway;
 401        if (asid == 0)
 402                goto failnoasid;
 403
 404        rmb();  /* TFH must be cache resident before reading ms_range_active */
 405
 406        /*
 407         * TFH is cache resident - at least briefly. Fail the dropin
 408         * if a range invalidate is active.
 409         */
 410        if (atomic_read(&gts->ts_gms->ms_range_active))
 411                goto failactive;
 412
 413        ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
 414        if (ret == VTOP_INVALID)
 415                goto failinval;
 416        if (ret == VTOP_RETRY)
 417                goto failupm;
 418
 419        if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
 420                gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
 421                if (atomic || !gru_update_cch(gts)) {
 422                        gts->ts_force_cch_reload = 1;
 423                        goto failupm;
 424                }
 425        }
 426
 427        if (unlikely(cbe) && pageshift == PAGE_SHIFT) {
 428                gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
 429                gru_flush_cache_cbe(cbe);
 430        }
 431
 432        gru_cb_set_istatus_active(cbk);
 433        gts->ustats.tlbdropin++;
 434        tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
 435                          GRU_PAGESIZE(pageshift));
 436        gru_dbg(grudev,
 437                "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
 438                " rw %d, ps %d, gpa 0x%lx\n",
 439                atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid,
 440                indexway, write, pageshift, gpa);
 441        STAT(tlb_dropin);
 442        return 0;
 443
 444failnoasid:
 445        /* No asid (delayed unload). */
 446        STAT(tlb_dropin_fail_no_asid);
 447        gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
 448        if (!cbk)
 449                tfh_user_polling_mode(tfh);
 450        else
 451                gru_flush_cache(tfh);
 452        gru_flush_cache_cbe(cbe);
 453        return -EAGAIN;
 454
 455failupm:
 456        /* Atomic failure switch CBR to UPM */
 457        tfh_user_polling_mode(tfh);
 458        gru_flush_cache_cbe(cbe);
 459        STAT(tlb_dropin_fail_upm);
 460        gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
 461        return 1;
 462
 463failfmm:
 464        /* FMM state on UPM call */
 465        gru_flush_cache(tfh);
 466        gru_flush_cache_cbe(cbe);
 467        STAT(tlb_dropin_fail_fmm);
 468        gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
 469        return 0;
 470
 471failnoexception:
 472        /* TFH status did not show exception pending */
 473        gru_flush_cache(tfh);
 474        gru_flush_cache_cbe(cbe);
 475        if (cbk)
 476                gru_flush_cache(cbk);
 477        STAT(tlb_dropin_fail_no_exception);
 478        gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
 479                tfh, tfh->status, tfh->state);
 480        return 0;
 481
 482failidle:
 483        /* TFH state was idle  - no miss pending */
 484        gru_flush_cache(tfh);
 485        gru_flush_cache_cbe(cbe);
 486        if (cbk)
 487                gru_flush_cache(cbk);
 488        STAT(tlb_dropin_fail_idle);
 489        gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
 490        return 0;
 491
 492failinval:
 493        /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
 494        tfh_exception(tfh);
 495        gru_flush_cache_cbe(cbe);
 496        STAT(tlb_dropin_fail_invalid);
 497        gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
 498        return -EFAULT;
 499
 500failactive:
 501        /* Range invalidate active. Switch to UPM iff atomic */
 502        if (!cbk)
 503                tfh_user_polling_mode(tfh);
 504        else
 505                gru_flush_cache(tfh);
 506        gru_flush_cache_cbe(cbe);
 507        STAT(tlb_dropin_fail_range_active);
 508        gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
 509                tfh, vaddr);
 510        return 1;
 511}
 512
 513/*
 514 * Process an external interrupt from the GRU. This interrupt is
 515 * caused by a TLB miss.
 516 * Note that this is the interrupt handler that is registered with linux
 517 * interrupt handlers.
 518 */
 519static irqreturn_t gru_intr(int chiplet, int blade)
 520{
 521        struct gru_state *gru;
 522        struct gru_tlb_fault_map imap, dmap;
 523        struct gru_thread_state *gts;
 524        struct gru_tlb_fault_handle *tfh = NULL;
 525        struct completion *cmp;
 526        int cbrnum, ctxnum;
 527
 528        STAT(intr);
 529
 530        gru = &gru_base[blade]->bs_grus[chiplet];
 531        if (!gru) {
 532                dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
 533                        raw_smp_processor_id(), chiplet);
 534                return IRQ_NONE;
 535        }
 536        get_clear_fault_map(gru, &imap, &dmap);
 537        gru_dbg(grudev,
 538                "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
 539                smp_processor_id(), chiplet, gru->gs_gid,
 540                imap.fault_bits[0], imap.fault_bits[1],
 541                dmap.fault_bits[0], dmap.fault_bits[1]);
 542
 543        for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
 544                STAT(intr_cbr);
 545                cmp = gru->gs_blade->bs_async_wq;
 546                if (cmp)
 547                        complete(cmp);
 548                gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
 549                        gru->gs_gid, cbrnum, cmp ? cmp->done : -1);
 550        }
 551
 552        for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
 553                STAT(intr_tfh);
 554                tfh = get_tfh_by_index(gru, cbrnum);
 555                prefetchw(tfh); /* Helps on hdw, required for emulator */
 556
 557                /*
 558                 * When hardware sets a bit in the faultmap, it implicitly
 559                 * locks the GRU context so that it cannot be unloaded.
 560                 * The gts cannot change until a TFH start/writestart command
 561                 * is issued.
 562                 */
 563                ctxnum = tfh->ctxnum;
 564                gts = gru->gs_gts[ctxnum];
 565
 566                /* Spurious interrupts can cause this. Ignore. */
 567                if (!gts) {
 568                        STAT(intr_spurious);
 569                        continue;
 570                }
 571
 572                /*
 573                 * This is running in interrupt context. Trylock the mmap_lock.
 574                 * If it fails, retry the fault in user context.
 575                 */
 576                gts->ustats.fmm_tlbmiss++;
 577                if (!gts->ts_force_cch_reload &&
 578                                        mmap_read_trylock(gts->ts_mm)) {
 579                        gru_try_dropin(gru, gts, tfh, NULL);
 580                        mmap_read_unlock(gts->ts_mm);
 581                } else {
 582                        tfh_user_polling_mode(tfh);
 583                        STAT(intr_mm_lock_failed);
 584                }
 585        }
 586        return IRQ_HANDLED;
 587}
 588
 589irqreturn_t gru0_intr(int irq, void *dev_id)
 590{
 591        return gru_intr(0, uv_numa_blade_id());
 592}
 593
 594irqreturn_t gru1_intr(int irq, void *dev_id)
 595{
 596        return gru_intr(1, uv_numa_blade_id());
 597}
 598
 599irqreturn_t gru_intr_mblade(int irq, void *dev_id)
 600{
 601        int blade;
 602
 603        for_each_possible_blade(blade) {
 604                if (uv_blade_nr_possible_cpus(blade))
 605                        continue;
 606                gru_intr(0, blade);
 607                gru_intr(1, blade);
 608        }
 609        return IRQ_HANDLED;
 610}
 611
 612
 613static int gru_user_dropin(struct gru_thread_state *gts,
 614                           struct gru_tlb_fault_handle *tfh,
 615                           void *cb)
 616{
 617        struct gru_mm_struct *gms = gts->ts_gms;
 618        int ret;
 619
 620        gts->ustats.upm_tlbmiss++;
 621        while (1) {
 622                wait_event(gms->ms_wait_queue,
 623                           atomic_read(&gms->ms_range_active) == 0);
 624                prefetchw(tfh); /* Helps on hdw, required for emulator */
 625                ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb);
 626                if (ret <= 0)
 627                        return ret;
 628                STAT(call_os_wait_queue);
 629        }
 630}
 631
 632/*
 633 * This interface is called as a result of a user detecting a "call OS" bit
 634 * in a user CB. Normally means that a TLB fault has occurred.
 635 *      cb - user virtual address of the CB
 636 */
 637int gru_handle_user_call_os(unsigned long cb)
 638{
 639        struct gru_tlb_fault_handle *tfh;
 640        struct gru_thread_state *gts;
 641        void *cbk;
 642        int ucbnum, cbrnum, ret = -EINVAL;
 643
 644        STAT(call_os);
 645
 646        /* sanity check the cb pointer */
 647        ucbnum = get_cb_number((void *)cb);
 648        if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
 649                return -EINVAL;
 650
 651        gts = gru_find_lock_gts(cb);
 652        if (!gts)
 653                return -EINVAL;
 654        gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
 655
 656        if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
 657                goto exit;
 658
 659        gru_check_context_placement(gts);
 660
 661        /*
 662         * CCH may contain stale data if ts_force_cch_reload is set.
 663         */
 664        if (gts->ts_gru && gts->ts_force_cch_reload) {
 665                gts->ts_force_cch_reload = 0;
 666                gru_update_cch(gts);
 667        }
 668
 669        ret = -EAGAIN;
 670        cbrnum = thread_cbr_number(gts, ucbnum);
 671        if (gts->ts_gru) {
 672                tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
 673                cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
 674                                gts->ts_ctxnum, ucbnum);
 675                ret = gru_user_dropin(gts, tfh, cbk);
 676        }
 677exit:
 678        gru_unlock_gts(gts);
 679        return ret;
 680}
 681
 682/*
 683 * Fetch the exception detail information for a CB that terminated with
 684 * an exception.
 685 */
 686int gru_get_exception_detail(unsigned long arg)
 687{
 688        struct control_block_extended_exc_detail excdet;
 689        struct gru_control_block_extended *cbe;
 690        struct gru_thread_state *gts;
 691        int ucbnum, cbrnum, ret;
 692
 693        STAT(user_exception);
 694        if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
 695                return -EFAULT;
 696
 697        gts = gru_find_lock_gts(excdet.cb);
 698        if (!gts)
 699                return -EINVAL;
 700
 701        gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
 702        ucbnum = get_cb_number((void *)excdet.cb);
 703        if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
 704                ret = -EINVAL;
 705        } else if (gts->ts_gru) {
 706                cbrnum = thread_cbr_number(gts, ucbnum);
 707                cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
 708                gru_flush_cache(cbe);   /* CBE not coherent */
 709                sync_core();            /* make sure we are have current data */
 710                excdet.opc = cbe->opccpy;
 711                excdet.exopc = cbe->exopccpy;
 712                excdet.ecause = cbe->ecause;
 713                excdet.exceptdet0 = cbe->idef1upd;
 714                excdet.exceptdet1 = cbe->idef3upd;
 715                excdet.cbrstate = cbe->cbrstate;
 716                excdet.cbrexecstatus = cbe->cbrexecstatus;
 717                gru_flush_cache_cbe(cbe);
 718                ret = 0;
 719        } else {
 720                ret = -EAGAIN;
 721        }
 722        gru_unlock_gts(gts);
 723
 724        gru_dbg(grudev,
 725                "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
 726                "exdet0 0x%lx, exdet1 0x%x\n",
 727                excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus,
 728                excdet.ecause, excdet.exceptdet0, excdet.exceptdet1);
 729        if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
 730                ret = -EFAULT;
 731        return ret;
 732}
 733
 734/*
 735 * User request to unload a context. Content is saved for possible reload.
 736 */
 737static int gru_unload_all_contexts(void)
 738{
 739        struct gru_thread_state *gts;
 740        struct gru_state *gru;
 741        int gid, ctxnum;
 742
 743        if (!capable(CAP_SYS_ADMIN))
 744                return -EPERM;
 745        foreach_gid(gid) {
 746                gru = GID_TO_GRU(gid);
 747                spin_lock(&gru->gs_lock);
 748                for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
 749                        gts = gru->gs_gts[ctxnum];
 750                        if (gts && mutex_trylock(&gts->ts_ctxlock)) {
 751                                spin_unlock(&gru->gs_lock);
 752                                gru_unload_context(gts, 1);
 753                                mutex_unlock(&gts->ts_ctxlock);
 754                                spin_lock(&gru->gs_lock);
 755                        }
 756                }
 757                spin_unlock(&gru->gs_lock);
 758        }
 759        return 0;
 760}
 761
 762int gru_user_unload_context(unsigned long arg)
 763{
 764        struct gru_thread_state *gts;
 765        struct gru_unload_context_req req;
 766
 767        STAT(user_unload_context);
 768        if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
 769                return -EFAULT;
 770
 771        gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
 772
 773        if (!req.gseg)
 774                return gru_unload_all_contexts();
 775
 776        gts = gru_find_lock_gts(req.gseg);
 777        if (!gts)
 778                return -EINVAL;
 779
 780        if (gts->ts_gru)
 781                gru_unload_context(gts, 1);
 782        gru_unlock_gts(gts);
 783
 784        return 0;
 785}
 786
 787/*
 788 * User request to flush a range of virtual addresses from the GRU TLB
 789 * (Mainly for testing).
 790 */
 791int gru_user_flush_tlb(unsigned long arg)
 792{
 793        struct gru_thread_state *gts;
 794        struct gru_flush_tlb_req req;
 795        struct gru_mm_struct *gms;
 796
 797        STAT(user_flush_tlb);
 798        if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
 799                return -EFAULT;
 800
 801        gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg,
 802                req.vaddr, req.len);
 803
 804        gts = gru_find_lock_gts(req.gseg);
 805        if (!gts)
 806                return -EINVAL;
 807
 808        gms = gts->ts_gms;
 809        gru_unlock_gts(gts);
 810        gru_flush_tlb_range(gms, req.vaddr, req.len);
 811
 812        return 0;
 813}
 814
 815/*
 816 * Fetch GSEG statisticss
 817 */
 818long gru_get_gseg_statistics(unsigned long arg)
 819{
 820        struct gru_thread_state *gts;
 821        struct gru_get_gseg_statistics_req req;
 822
 823        if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
 824                return -EFAULT;
 825
 826        /*
 827         * The library creates arrays of contexts for threaded programs.
 828         * If no gts exists in the array, the context has never been used & all
 829         * statistics are implicitly 0.
 830         */
 831        gts = gru_find_lock_gts(req.gseg);
 832        if (gts) {
 833                memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats));
 834                gru_unlock_gts(gts);
 835        } else {
 836                memset(&req.stats, 0, sizeof(gts->ustats));
 837        }
 838
 839        if (copy_to_user((void __user *)arg, &req, sizeof(req)))
 840                return -EFAULT;
 841
 842        return 0;
 843}
 844
 845/*
 846 * Register the current task as the user of the GSEG slice.
 847 * Needed for TLB fault interrupt targeting.
 848 */
 849int gru_set_context_option(unsigned long arg)
 850{
 851        struct gru_thread_state *gts;
 852        struct gru_set_context_option_req req;
 853        int ret = 0;
 854
 855        STAT(set_context_option);
 856        if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
 857                return -EFAULT;
 858        gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
 859
 860        gts = gru_find_lock_gts(req.gseg);
 861        if (!gts) {
 862                gts = gru_alloc_locked_gts(req.gseg);
 863                if (IS_ERR(gts))
 864                        return PTR_ERR(gts);
 865        }
 866
 867        switch (req.op) {
 868        case sco_blade_chiplet:
 869                /* Select blade/chiplet for GRU context */
 870                if (req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB ||
 871                    req.val1 < -1 || req.val1 >= GRU_MAX_BLADES ||
 872                    (req.val1 >= 0 && !gru_base[req.val1])) {
 873                        ret = -EINVAL;
 874                } else {
 875                        gts->ts_user_blade_id = req.val1;
 876                        gts->ts_user_chiplet_id = req.val0;
 877                        gru_check_context_placement(gts);
 878                }
 879                break;
 880        case sco_gseg_owner:
 881                /* Register the current task as the GSEG owner */
 882                gts->ts_tgid_owner = current->tgid;
 883                break;
 884        case sco_cch_req_slice:
 885                /* Set the CCH slice option */
 886                gts->ts_cch_req_slice = req.val1 & 3;
 887                break;
 888        default:
 889                ret = -EINVAL;
 890        }
 891        gru_unlock_gts(gts);
 892
 893        return ret;
 894}
 895