linux/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
<<
>>
Prefs
   1/*
   2 * Copyright 2019 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Author: Jonathan Kim <jonathan.kim@amd.com>
  23 *
  24 */
  25
  26#include <linux/perf_event.h>
  27#include <linux/init.h>
  28#include "amdgpu.h"
  29#include "amdgpu_pmu.h"
  30#include "df_v3_6.h"
  31
  32#define PMU_NAME_SIZE 32
  33
  34/* record to keep track of pmu entry per pmu type per device */
  35struct amdgpu_pmu_entry {
  36        struct list_head entry;
  37        struct amdgpu_device *adev;
  38        struct pmu pmu;
  39        unsigned int pmu_perf_type;
  40};
  41
  42static LIST_HEAD(amdgpu_pmu_list);
  43
  44
  45/* initialize perf counter */
  46static int amdgpu_perf_event_init(struct perf_event *event)
  47{
  48        struct hw_perf_event *hwc = &event->hw;
  49
  50        /* test the event attr type check for PMU enumeration */
  51        if (event->attr.type != event->pmu->type)
  52                return -ENOENT;
  53
  54        /* update the hw_perf_event struct with config data */
  55        hwc->config = event->attr.config;
  56
  57        return 0;
  58}
  59
  60/* start perf counter */
  61static void amdgpu_perf_start(struct perf_event *event, int flags)
  62{
  63        struct hw_perf_event *hwc = &event->hw;
  64        struct amdgpu_pmu_entry *pe = container_of(event->pmu,
  65                                                  struct amdgpu_pmu_entry,
  66                                                  pmu);
  67
  68        if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
  69                return;
  70
  71        WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
  72        hwc->state = 0;
  73
  74        switch (pe->pmu_perf_type) {
  75        case PERF_TYPE_AMDGPU_DF:
  76                if (!(flags & PERF_EF_RELOAD))
  77                        pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 1);
  78
  79                pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 0);
  80                break;
  81        default:
  82                break;
  83        }
  84
  85        perf_event_update_userpage(event);
  86
  87}
  88
  89/* read perf counter */
  90static void amdgpu_perf_read(struct perf_event *event)
  91{
  92        struct hw_perf_event *hwc = &event->hw;
  93        struct amdgpu_pmu_entry *pe = container_of(event->pmu,
  94                                                  struct amdgpu_pmu_entry,
  95                                                  pmu);
  96
  97        u64 count, prev;
  98
  99        do {
 100                prev = local64_read(&hwc->prev_count);
 101
 102                switch (pe->pmu_perf_type) {
 103                case PERF_TYPE_AMDGPU_DF:
 104                        pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->config,
 105                                                          &count);
 106                        break;
 107                default:
 108                        count = 0;
 109                        break;
 110                }
 111        } while (local64_cmpxchg(&hwc->prev_count, prev, count) != prev);
 112
 113        local64_add(count - prev, &event->count);
 114}
 115
 116/* stop perf counter */
 117static void amdgpu_perf_stop(struct perf_event *event, int flags)
 118{
 119        struct hw_perf_event *hwc = &event->hw;
 120        struct amdgpu_pmu_entry *pe = container_of(event->pmu,
 121                                                  struct amdgpu_pmu_entry,
 122                                                  pmu);
 123
 124        if (hwc->state & PERF_HES_UPTODATE)
 125                return;
 126
 127        switch (pe->pmu_perf_type) {
 128        case PERF_TYPE_AMDGPU_DF:
 129                pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 0);
 130                break;
 131        default:
 132                break;
 133        }
 134
 135        WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
 136        hwc->state |= PERF_HES_STOPPED;
 137
 138        if (hwc->state & PERF_HES_UPTODATE)
 139                return;
 140
 141        amdgpu_perf_read(event);
 142        hwc->state |= PERF_HES_UPTODATE;
 143}
 144
 145/* add perf counter  */
 146static int amdgpu_perf_add(struct perf_event *event, int flags)
 147{
 148        struct hw_perf_event *hwc = &event->hw;
 149        int retval;
 150
 151        struct amdgpu_pmu_entry *pe = container_of(event->pmu,
 152                                                  struct amdgpu_pmu_entry,
 153                                                  pmu);
 154
 155        event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
 156
 157        switch (pe->pmu_perf_type) {
 158        case PERF_TYPE_AMDGPU_DF:
 159                retval = pe->adev->df.funcs->pmc_start(pe->adev,
 160                                                       hwc->config, 1);
 161                break;
 162        default:
 163                return 0;
 164        }
 165
 166        if (retval)
 167                return retval;
 168
 169        if (flags & PERF_EF_START)
 170                amdgpu_perf_start(event, PERF_EF_RELOAD);
 171
 172        return retval;
 173
 174}
 175
 176/* delete perf counter  */
 177static void amdgpu_perf_del(struct perf_event *event, int flags)
 178{
 179        struct hw_perf_event *hwc = &event->hw;
 180        struct amdgpu_pmu_entry *pe = container_of(event->pmu,
 181                                                  struct amdgpu_pmu_entry,
 182                                                  pmu);
 183
 184        amdgpu_perf_stop(event, PERF_EF_UPDATE);
 185
 186        switch (pe->pmu_perf_type) {
 187        case PERF_TYPE_AMDGPU_DF:
 188                pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 1);
 189                break;
 190        default:
 191                break;
 192        }
 193
 194        perf_event_update_userpage(event);
 195}
 196
 197/* vega20 pmus */
 198
 199/* init pmu tracking per pmu type */
 200static int init_pmu_by_type(struct amdgpu_device *adev,
 201                  const struct attribute_group *attr_groups[],
 202                  char *pmu_type_name, char *pmu_file_prefix,
 203                  unsigned int pmu_perf_type,
 204                  unsigned int num_counters)
 205{
 206        char pmu_name[PMU_NAME_SIZE];
 207        struct amdgpu_pmu_entry *pmu_entry;
 208        int ret = 0;
 209
 210        pmu_entry = kzalloc(sizeof(struct amdgpu_pmu_entry), GFP_KERNEL);
 211
 212        if (!pmu_entry)
 213                return -ENOMEM;
 214
 215        pmu_entry->adev = adev;
 216        pmu_entry->pmu = (struct pmu){
 217                .event_init = amdgpu_perf_event_init,
 218                .add = amdgpu_perf_add,
 219                .del = amdgpu_perf_del,
 220                .start = amdgpu_perf_start,
 221                .stop = amdgpu_perf_stop,
 222                .read = amdgpu_perf_read,
 223                .task_ctx_nr = perf_invalid_context,
 224        };
 225
 226        pmu_entry->pmu.attr_groups = attr_groups;
 227        pmu_entry->pmu_perf_type = pmu_perf_type;
 228        snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d",
 229                                pmu_file_prefix, adev->ddev->primary->index);
 230
 231        ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1);
 232
 233        if (ret) {
 234                kfree(pmu_entry);
 235                pr_warn("Error initializing AMDGPU %s PMUs.\n", pmu_type_name);
 236                return ret;
 237        }
 238
 239        pr_info("Detected AMDGPU %s Counters. # of Counters = %d.\n",
 240                        pmu_type_name, num_counters);
 241
 242        list_add_tail(&pmu_entry->entry, &amdgpu_pmu_list);
 243
 244        return 0;
 245}
 246
 247/* init amdgpu_pmu */
 248int amdgpu_pmu_init(struct amdgpu_device *adev)
 249{
 250        int ret = 0;
 251
 252        switch (adev->asic_type) {
 253        case CHIP_VEGA20:
 254                /* init df */
 255                ret = init_pmu_by_type(adev, df_v3_6_attr_groups,
 256                                       "DF", "amdgpu_df", PERF_TYPE_AMDGPU_DF,
 257                                       DF_V3_6_MAX_COUNTERS);
 258
 259                /* other pmu types go here*/
 260                break;
 261        default:
 262                return 0;
 263        }
 264
 265        return 0;
 266}
 267
 268
 269/* destroy all pmu data associated with target device */
 270void amdgpu_pmu_fini(struct amdgpu_device *adev)
 271{
 272        struct amdgpu_pmu_entry *pe, *temp;
 273
 274        list_for_each_entry_safe(pe, temp, &amdgpu_pmu_list, entry) {
 275                if (pe->adev == adev) {
 276                        list_del(&pe->entry);
 277                        perf_pmu_unregister(&pe->pmu);
 278                        kfree(pe);
 279                }
 280        }
 281}
 282