linux/drivers/misc/cxl/fault.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 IBM Corp.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public License
   6 * as published by the Free Software Foundation; either version
   7 * 2 of the License, or (at your option) any later version.
   8 */
   9
  10#include <linux/workqueue.h>
  11#include <linux/sched.h>
  12#include <linux/pid.h>
  13#include <linux/mm.h>
  14#include <linux/moduleparam.h>
  15
  16#undef MODULE_PARAM_PREFIX
  17#define MODULE_PARAM_PREFIX "cxl" "."
  18#include <asm/current.h>
  19#include <asm/copro.h>
  20#include <asm/mmu.h>
  21
  22#include "cxl.h"
  23#include "trace.h"
  24
  25static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
  26{
  27        return ((sste->vsid_data == cpu_to_be64(slb->vsid)) &&
  28                (sste->esid_data == cpu_to_be64(slb->esid)));
  29}
  30
  31/*
  32 * This finds a free SSTE for the given SLB, or returns NULL if it's already in
  33 * the segment table.
  34 */
  35static struct cxl_sste* find_free_sste(struct cxl_context *ctx,
  36                                       struct copro_slb *slb)
  37{
  38        struct cxl_sste *primary, *sste, *ret = NULL;
  39        unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */
  40        unsigned int entry;
  41        unsigned int hash;
  42
  43        if (slb->vsid & SLB_VSID_B_1T)
  44                hash = (slb->esid >> SID_SHIFT_1T) & mask;
  45        else /* 256M */
  46                hash = (slb->esid >> SID_SHIFT) & mask;
  47
  48        primary = ctx->sstp + (hash << 3);
  49
  50        for (entry = 0, sste = primary; entry < 8; entry++, sste++) {
  51                if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
  52                        ret = sste;
  53                if (sste_matches(sste, slb))
  54                        return NULL;
  55        }
  56        if (ret)
  57                return ret;
  58
  59        /* Nothing free, select an entry to cast out */
  60        ret = primary + ctx->sst_lru;
  61        ctx->sst_lru = (ctx->sst_lru + 1) & 0x7;
  62
  63        return ret;
  64}
  65
  66static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
  67{
  68        /* mask is the group index, we search primary and secondary here. */
  69        struct cxl_sste *sste;
  70        unsigned long flags;
  71
  72        spin_lock_irqsave(&ctx->sste_lock, flags);
  73        sste = find_free_sste(ctx, slb);
  74        if (!sste)
  75                goto out_unlock;
  76
  77        pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
  78                        sste - ctx->sstp, slb->vsid, slb->esid);
  79        trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid);
  80
  81        sste->vsid_data = cpu_to_be64(slb->vsid);
  82        sste->esid_data = cpu_to_be64(slb->esid);
  83out_unlock:
  84        spin_unlock_irqrestore(&ctx->sste_lock, flags);
  85}
  86
  87static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm,
  88                             u64 ea)
  89{
  90        struct copro_slb slb = {0,0};
  91        int rc;
  92
  93        if (!(rc = copro_calculate_slb(mm, ea, &slb))) {
  94                cxl_load_segment(ctx, &slb);
  95        }
  96
  97        return rc;
  98}
  99
 100static void cxl_ack_ae(struct cxl_context *ctx)
 101{
 102        unsigned long flags;
 103
 104        cxl_ack_irq(ctx, CXL_PSL_TFC_An_AE, 0);
 105
 106        spin_lock_irqsave(&ctx->lock, flags);
 107        ctx->pending_fault = true;
 108        ctx->fault_addr = ctx->dar;
 109        ctx->fault_dsisr = ctx->dsisr;
 110        spin_unlock_irqrestore(&ctx->lock, flags);
 111
 112        wake_up_all(&ctx->wq);
 113}
 114
 115static int cxl_handle_segment_miss(struct cxl_context *ctx,
 116                                   struct mm_struct *mm, u64 ea)
 117{
 118        int rc;
 119
 120        pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea);
 121        trace_cxl_ste_miss(ctx, ea);
 122
 123        if ((rc = cxl_fault_segment(ctx, mm, ea)))
 124                cxl_ack_ae(ctx);
 125        else {
 126
 127                mb(); /* Order seg table write to TFC MMIO write */
 128                cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
 129        }
 130
 131        return IRQ_HANDLED;
 132}
 133
 134static void cxl_handle_page_fault(struct cxl_context *ctx,
 135                                  struct mm_struct *mm, u64 dsisr, u64 dar)
 136{
 137        unsigned flt = 0;
 138        int result;
 139        unsigned long access, flags, inv_flags = 0;
 140
 141        trace_cxl_pte_miss(ctx, dsisr, dar);
 142
 143        if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
 144                pr_devel("copro_handle_mm_fault failed: %#x\n", result);
 145                return cxl_ack_ae(ctx);
 146        }
 147
 148        /*
 149         * update_mmu_cache() will not have loaded the hash since current->trap
 150         * is not a 0x400 or 0x300, so just call hash_page_mm() here.
 151         */
 152        access = _PAGE_PRESENT;
 153        if (dsisr & CXL_PSL_DSISR_An_S)
 154                access |= _PAGE_RW;
 155        if ((!ctx->kernel) || ~(dar & (1ULL << 63)))
 156                access |= _PAGE_USER;
 157
 158        if (dsisr & DSISR_NOHPTE)
 159                inv_flags |= HPTE_NOHPTE_UPDATE;
 160
 161        local_irq_save(flags);
 162        hash_page_mm(mm, dar, access, 0x300, inv_flags);
 163        local_irq_restore(flags);
 164
 165        pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
 166        cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
 167}
 168
 169void cxl_handle_fault(struct work_struct *fault_work)
 170{
 171        struct cxl_context *ctx =
 172                container_of(fault_work, struct cxl_context, fault_work);
 173        u64 dsisr = ctx->dsisr;
 174        u64 dar = ctx->dar;
 175        struct task_struct *task = NULL;
 176        struct mm_struct *mm = NULL;
 177
 178        if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
 179            cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar ||
 180            cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) {
 181                /* Most likely explanation is harmless - a dedicated process
 182                 * has detached and these were cleared by the PSL purge, but
 183                 * warn about it just in case */
 184                dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n");
 185                return;
 186        }
 187
 188        /* Early return if the context is being / has been detached */
 189        if (ctx->status == CLOSED) {
 190                cxl_ack_ae(ctx);
 191                return;
 192        }
 193
 194        pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. "
 195                "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
 196
 197        if (!ctx->kernel) {
 198                if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
 199                        pr_devel("cxl_handle_fault unable to get task %i\n",
 200                                 pid_nr(ctx->pid));
 201                        cxl_ack_ae(ctx);
 202                        return;
 203                }
 204                if (!(mm = get_task_mm(task))) {
 205                        pr_devel("cxl_handle_fault unable to get mm %i\n",
 206                                 pid_nr(ctx->pid));
 207                        cxl_ack_ae(ctx);
 208                        goto out;
 209                }
 210        }
 211
 212        if (dsisr & CXL_PSL_DSISR_An_DS)
 213                cxl_handle_segment_miss(ctx, mm, dar);
 214        else if (dsisr & CXL_PSL_DSISR_An_DM)
 215                cxl_handle_page_fault(ctx, mm, dsisr, dar);
 216        else
 217                WARN(1, "cxl_handle_fault has nothing to handle\n");
 218
 219        if (mm)
 220                mmput(mm);
 221out:
 222        if (task)
 223                put_task_struct(task);
 224}
 225
 226static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
 227{
 228        int rc;
 229        struct task_struct *task;
 230        struct mm_struct *mm;
 231
 232        if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
 233                pr_devel("cxl_prefault_one unable to get task %i\n",
 234                         pid_nr(ctx->pid));
 235                return;
 236        }
 237        if (!(mm = get_task_mm(task))) {
 238                pr_devel("cxl_prefault_one unable to get mm %i\n",
 239                         pid_nr(ctx->pid));
 240                put_task_struct(task);
 241                return;
 242        }
 243
 244        rc = cxl_fault_segment(ctx, mm, ea);
 245
 246        mmput(mm);
 247        put_task_struct(task);
 248}
 249
 250static u64 next_segment(u64 ea, u64 vsid)
 251{
 252        if (vsid & SLB_VSID_B_1T)
 253                ea |= (1ULL << 40) - 1;
 254        else
 255                ea |= (1ULL << 28) - 1;
 256
 257        return ea + 1;
 258}
 259
 260static void cxl_prefault_vma(struct cxl_context *ctx)
 261{
 262        u64 ea, last_esid = 0;
 263        struct copro_slb slb;
 264        struct vm_area_struct *vma;
 265        int rc;
 266        struct task_struct *task;
 267        struct mm_struct *mm;
 268
 269        if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
 270                pr_devel("cxl_prefault_vma unable to get task %i\n",
 271                         pid_nr(ctx->pid));
 272                return;
 273        }
 274        if (!(mm = get_task_mm(task))) {
 275                pr_devel("cxl_prefault_vm unable to get mm %i\n",
 276                         pid_nr(ctx->pid));
 277                goto out1;
 278        }
 279
 280        down_read(&mm->mmap_sem);
 281        for (vma = mm->mmap; vma; vma = vma->vm_next) {
 282                for (ea = vma->vm_start; ea < vma->vm_end;
 283                                ea = next_segment(ea, slb.vsid)) {
 284                        rc = copro_calculate_slb(mm, ea, &slb);
 285                        if (rc)
 286                                continue;
 287
 288                        if (last_esid == slb.esid)
 289                                continue;
 290
 291                        cxl_load_segment(ctx, &slb);
 292                        last_esid = slb.esid;
 293                }
 294        }
 295        up_read(&mm->mmap_sem);
 296
 297        mmput(mm);
 298out1:
 299        put_task_struct(task);
 300}
 301
 302void cxl_prefault(struct cxl_context *ctx, u64 wed)
 303{
 304        switch (ctx->afu->prefault_mode) {
 305        case CXL_PREFAULT_WED:
 306                cxl_prefault_one(ctx, wed);
 307                break;
 308        case CXL_PREFAULT_ALL:
 309                cxl_prefault_vma(ctx);
 310                break;
 311        default:
 312                break;
 313        }
 314}
 315