linux/arch/powerpc/platforms/pseries/dtl.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Virtual Processor Dispatch Trace Log
   4 *
   5 * (C) Copyright IBM Corporation 2009
   6 *
   7 * Author: Jeremy Kerr <jk@ozlabs.org>
   8 */
   9
  10#include <linux/slab.h>
  11#include <linux/spinlock.h>
  12#include <asm/smp.h>
  13#include <linux/uaccess.h>
  14#include <linux/debugfs.h>
  15#include <asm/firmware.h>
  16#include <asm/dtl.h>
  17#include <asm/lppaca.h>
  18#include <asm/plpar_wrappers.h>
  19#include <asm/machdep.h>
  20
  21struct dtl {
  22        struct dtl_entry        *buf;
  23        int                     cpu;
  24        int                     buf_entries;
  25        u64                     last_idx;
  26        spinlock_t              lock;
  27};
  28static DEFINE_PER_CPU(struct dtl, cpu_dtl);
  29
  30static u8 dtl_event_mask = DTL_LOG_ALL;
  31
  32
  33/*
  34 * Size of per-cpu log buffers. Firmware requires that the buffer does
  35 * not cross a 4k boundary.
  36 */
  37static int dtl_buf_entries = N_DISPATCH_LOG;
  38
  39#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
  40struct dtl_ring {
  41        u64     write_index;
  42        struct dtl_entry *write_ptr;
  43        struct dtl_entry *buf;
  44        struct dtl_entry *buf_end;
  45};
  46
  47static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);
  48
  49static atomic_t dtl_count;
  50
  51/*
  52 * The cpu accounting code controls the DTL ring buffer, and we get
  53 * given entries as they are processed.
  54 */
  55static void consume_dtle(struct dtl_entry *dtle, u64 index)
  56{
  57        struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
  58        struct dtl_entry *wp = dtlr->write_ptr;
  59        struct lppaca *vpa = local_paca->lppaca_ptr;
  60
  61        if (!wp)
  62                return;
  63
  64        *wp = *dtle;
  65        barrier();
  66
  67        /* check for hypervisor ring buffer overflow, ignore this entry if so */
  68        if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
  69                return;
  70
  71        ++wp;
  72        if (wp == dtlr->buf_end)
  73                wp = dtlr->buf;
  74        dtlr->write_ptr = wp;
  75
  76        /* incrementing write_index makes the new entry visible */
  77        smp_wmb();
  78        ++dtlr->write_index;
  79}
  80
  81static int dtl_start(struct dtl *dtl)
  82{
  83        struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
  84
  85        dtlr->buf = dtl->buf;
  86        dtlr->buf_end = dtl->buf + dtl->buf_entries;
  87        dtlr->write_index = 0;
  88
  89        /* setting write_ptr enables logging into our buffer */
  90        smp_wmb();
  91        dtlr->write_ptr = dtl->buf;
  92
  93        /* enable event logging */
  94        lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
  95
  96        dtl_consumer = consume_dtle;
  97        atomic_inc(&dtl_count);
  98        return 0;
  99}
 100
 101static void dtl_stop(struct dtl *dtl)
 102{
 103        struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
 104
 105        dtlr->write_ptr = NULL;
 106        smp_wmb();
 107
 108        dtlr->buf = NULL;
 109
 110        /* restore dtl_enable_mask */
 111        lppaca_of(dtl->cpu).dtl_enable_mask = DTL_LOG_PREEMPT;
 112
 113        if (atomic_dec_and_test(&dtl_count))
 114                dtl_consumer = NULL;
 115}
 116
 117static u64 dtl_current_index(struct dtl *dtl)
 118{
 119        return per_cpu(dtl_rings, dtl->cpu).write_index;
 120}
 121
 122#else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 123
 124static int dtl_start(struct dtl *dtl)
 125{
 126        unsigned long addr;
 127        int ret, hwcpu;
 128
 129        /* Register our dtl buffer with the hypervisor. The HV expects the
 130         * buffer size to be passed in the second word of the buffer */
 131        ((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES);
 132
 133        hwcpu = get_hard_smp_processor_id(dtl->cpu);
 134        addr = __pa(dtl->buf);
 135        ret = register_dtl(hwcpu, addr);
 136        if (ret) {
 137                printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
 138                       "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
 139                return -EIO;
 140        }
 141
 142        /* set our initial buffer indices */
 143        lppaca_of(dtl->cpu).dtl_idx = 0;
 144
 145        /* ensure that our updates to the lppaca fields have occurred before
 146         * we actually enable the logging */
 147        smp_wmb();
 148
 149        /* enable event logging */
 150        lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
 151
 152        return 0;
 153}
 154
 155static void dtl_stop(struct dtl *dtl)
 156{
 157        int hwcpu = get_hard_smp_processor_id(dtl->cpu);
 158
 159        lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
 160
 161        unregister_dtl(hwcpu);
 162}
 163
 164static u64 dtl_current_index(struct dtl *dtl)
 165{
 166        return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx);
 167}
 168#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 169
 170static int dtl_enable(struct dtl *dtl)
 171{
 172        long int n_entries;
 173        long int rc;
 174        struct dtl_entry *buf = NULL;
 175
 176        if (!dtl_cache)
 177                return -ENOMEM;
 178
 179        /* only allow one reader */
 180        if (dtl->buf)
 181                return -EBUSY;
 182
 183        /* ensure there are no other conflicting dtl users */
 184        if (!read_trylock(&dtl_access_lock))
 185                return -EBUSY;
 186
 187        n_entries = dtl_buf_entries;
 188        buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
 189        if (!buf) {
 190                printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
 191                                __func__, dtl->cpu);
 192                read_unlock(&dtl_access_lock);
 193                return -ENOMEM;
 194        }
 195
 196        spin_lock(&dtl->lock);
 197        rc = -EBUSY;
 198        if (!dtl->buf) {
 199                /* store the original allocation size for use during read */
 200                dtl->buf_entries = n_entries;
 201                dtl->buf = buf;
 202                dtl->last_idx = 0;
 203                rc = dtl_start(dtl);
 204                if (rc)
 205                        dtl->buf = NULL;
 206        }
 207        spin_unlock(&dtl->lock);
 208
 209        if (rc) {
 210                read_unlock(&dtl_access_lock);
 211                kmem_cache_free(dtl_cache, buf);
 212        }
 213
 214        return rc;
 215}
 216
 217static void dtl_disable(struct dtl *dtl)
 218{
 219        spin_lock(&dtl->lock);
 220        dtl_stop(dtl);
 221        kmem_cache_free(dtl_cache, dtl->buf);
 222        dtl->buf = NULL;
 223        dtl->buf_entries = 0;
 224        spin_unlock(&dtl->lock);
 225        read_unlock(&dtl_access_lock);
 226}
 227
 228/* file interface */
 229
 230static int dtl_file_open(struct inode *inode, struct file *filp)
 231{
 232        struct dtl *dtl = inode->i_private;
 233        int rc;
 234
 235        rc = dtl_enable(dtl);
 236        if (rc)
 237                return rc;
 238
 239        filp->private_data = dtl;
 240        return 0;
 241}
 242
 243static int dtl_file_release(struct inode *inode, struct file *filp)
 244{
 245        struct dtl *dtl = inode->i_private;
 246        dtl_disable(dtl);
 247        return 0;
 248}
 249
 250static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
 251                loff_t *pos)
 252{
 253        long int rc, n_read, n_req, read_size;
 254        struct dtl *dtl;
 255        u64 cur_idx, last_idx, i;
 256
 257        if ((len % sizeof(struct dtl_entry)) != 0)
 258                return -EINVAL;
 259
 260        dtl = filp->private_data;
 261
 262        /* requested number of entries to read */
 263        n_req = len / sizeof(struct dtl_entry);
 264
 265        /* actual number of entries read */
 266        n_read = 0;
 267
 268        spin_lock(&dtl->lock);
 269
 270        cur_idx = dtl_current_index(dtl);
 271        last_idx = dtl->last_idx;
 272
 273        if (last_idx + dtl->buf_entries <= cur_idx)
 274                last_idx = cur_idx - dtl->buf_entries + 1;
 275
 276        if (last_idx + n_req > cur_idx)
 277                n_req = cur_idx - last_idx;
 278
 279        if (n_req > 0)
 280                dtl->last_idx = last_idx + n_req;
 281
 282        spin_unlock(&dtl->lock);
 283
 284        if (n_req <= 0)
 285                return 0;
 286
 287        i = last_idx % dtl->buf_entries;
 288
 289        /* read the tail of the buffer if we've wrapped */
 290        if (i + n_req > dtl->buf_entries) {
 291                read_size = dtl->buf_entries - i;
 292
 293                rc = copy_to_user(buf, &dtl->buf[i],
 294                                read_size * sizeof(struct dtl_entry));
 295                if (rc)
 296                        return -EFAULT;
 297
 298                i = 0;
 299                n_req -= read_size;
 300                n_read += read_size;
 301                buf += read_size * sizeof(struct dtl_entry);
 302        }
 303
 304        /* .. and now the head */
 305        rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry));
 306        if (rc)
 307                return -EFAULT;
 308
 309        n_read += n_req;
 310
 311        return n_read * sizeof(struct dtl_entry);
 312}
 313
 314static const struct file_operations dtl_fops = {
 315        .open           = dtl_file_open,
 316        .release        = dtl_file_release,
 317        .read           = dtl_file_read,
 318        .llseek         = no_llseek,
 319};
 320
 321static struct dentry *dtl_dir;
 322
 323static void dtl_setup_file(struct dtl *dtl)
 324{
 325        char name[10];
 326
 327        sprintf(name, "cpu-%d", dtl->cpu);
 328
 329        debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops);
 330}
 331
 332static int dtl_init(void)
 333{
 334        int i;
 335
 336        if (!firmware_has_feature(FW_FEATURE_SPLPAR))
 337                return -ENODEV;
 338
 339        /* set up common debugfs structure */
 340
 341        dtl_dir = debugfs_create_dir("dtl", arch_debugfs_dir);
 342
 343        debugfs_create_x8("dtl_event_mask", 0600, dtl_dir, &dtl_event_mask);
 344        debugfs_create_u32("dtl_buf_entries", 0400, dtl_dir, &dtl_buf_entries);
 345
 346        /* set up the per-cpu log structures */
 347        for_each_possible_cpu(i) {
 348                struct dtl *dtl = &per_cpu(cpu_dtl, i);
 349                spin_lock_init(&dtl->lock);
 350                dtl->cpu = i;
 351
 352                dtl_setup_file(dtl);
 353        }
 354
 355        return 0;
 356}
 357machine_arch_initcall(pseries, dtl_init);
 358