linux/drivers/fpga/dfl-fme-pr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Driver for FPGA Management Engine (FME) Partial Reconfiguration
   4 *
   5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
   6 *
   7 * Authors:
   8 *   Kang Luwei <luwei.kang@intel.com>
   9 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
  10 *   Wu Hao <hao.wu@intel.com>
  11 *   Joseph Grecco <joe.grecco@intel.com>
  12 *   Enno Luebbers <enno.luebbers@intel.com>
  13 *   Tim Whisonant <tim.whisonant@intel.com>
  14 *   Ananda Ravuri <ananda.ravuri@intel.com>
  15 *   Christopher Rauer <christopher.rauer@intel.com>
  16 *   Henry Mitchel <henry.mitchel@intel.com>
  17 */
  18
  19#include <linux/types.h>
  20#include <linux/device.h>
  21#include <linux/vmalloc.h>
  22#include <linux/uaccess.h>
  23#include <linux/fpga/fpga-mgr.h>
  24#include <linux/fpga/fpga-bridge.h>
  25#include <linux/fpga/fpga-region.h>
  26#include <linux/fpga-dfl.h>
  27
  28#include "dfl.h"
  29#include "dfl-fme.h"
  30#include "dfl-fme-pr.h"
  31
  32static struct dfl_fme_region *
  33dfl_fme_region_find_by_port_id(struct dfl_fme *fme, int port_id)
  34{
  35        struct dfl_fme_region *fme_region;
  36
  37        list_for_each_entry(fme_region, &fme->region_list, node)
  38                if (fme_region->port_id == port_id)
  39                        return fme_region;
  40
  41        return NULL;
  42}
  43
  44static int dfl_fme_region_match(struct device *dev, const void *data)
  45{
  46        return dev->parent == data;
  47}
  48
  49static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id)
  50{
  51        struct dfl_fme_region *fme_region;
  52        struct fpga_region *region;
  53
  54        fme_region = dfl_fme_region_find_by_port_id(fme, port_id);
  55        if (!fme_region)
  56                return NULL;
  57
  58        region = fpga_region_class_find(NULL, &fme_region->region->dev,
  59                                        dfl_fme_region_match);
  60        if (!region)
  61                return NULL;
  62
  63        return region;
  64}
  65
  66static int fme_pr(struct platform_device *pdev, unsigned long arg)
  67{
  68        struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  69        void __user *argp = (void __user *)arg;
  70        struct dfl_fpga_fme_port_pr port_pr;
  71        struct fpga_image_info *info;
  72        struct fpga_region *region;
  73        void __iomem *fme_hdr;
  74        struct dfl_fme *fme;
  75        unsigned long minsz;
  76        void *buf = NULL;
  77        size_t length;
  78        int ret = 0;
  79        u64 v;
  80
  81        minsz = offsetofend(struct dfl_fpga_fme_port_pr, buffer_address);
  82
  83        if (copy_from_user(&port_pr, argp, minsz))
  84                return -EFAULT;
  85
  86        if (port_pr.argsz < minsz || port_pr.flags)
  87                return -EINVAL;
  88
  89        /* get fme header region */
  90        fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
  91                                               FME_FEATURE_ID_HEADER);
  92
  93        /* check port id */
  94        v = readq(fme_hdr + FME_HDR_CAP);
  95        if (port_pr.port_id >= FIELD_GET(FME_CAP_NUM_PORTS, v)) {
  96                dev_dbg(&pdev->dev, "port number more than maximum\n");
  97                return -EINVAL;
  98        }
  99
 100        /*
 101         * align PR buffer per PR bandwidth, as HW ignores the extra padding
 102         * data automatically.
 103         */
 104        length = ALIGN(port_pr.buffer_size, 4);
 105
 106        buf = vmalloc(length);
 107        if (!buf)
 108                return -ENOMEM;
 109
 110        if (copy_from_user(buf,
 111                           (void __user *)(unsigned long)port_pr.buffer_address,
 112                           port_pr.buffer_size)) {
 113                ret = -EFAULT;
 114                goto free_exit;
 115        }
 116
 117        /* prepare fpga_image_info for PR */
 118        info = fpga_image_info_alloc(&pdev->dev);
 119        if (!info) {
 120                ret = -ENOMEM;
 121                goto free_exit;
 122        }
 123
 124        info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
 125
 126        mutex_lock(&pdata->lock);
 127        fme = dfl_fpga_pdata_get_private(pdata);
 128        /* fme device has been unregistered. */
 129        if (!fme) {
 130                ret = -EINVAL;
 131                goto unlock_exit;
 132        }
 133
 134        region = dfl_fme_region_find(fme, port_pr.port_id);
 135        if (!region) {
 136                ret = -EINVAL;
 137                goto unlock_exit;
 138        }
 139
 140        fpga_image_info_free(region->info);
 141
 142        info->buf = buf;
 143        info->count = length;
 144        info->region_id = port_pr.port_id;
 145        region->info = info;
 146
 147        ret = fpga_region_program_fpga(region);
 148
 149        /*
 150         * it allows userspace to reset the PR region's logic by disabling and
 151         * reenabling the bridge to clear things out between acceleration runs.
 152         * so no need to hold the bridges after partial reconfiguration.
 153         */
 154        if (region->get_bridges)
 155                fpga_bridges_put(&region->bridge_list);
 156
 157        put_device(&region->dev);
 158unlock_exit:
 159        mutex_unlock(&pdata->lock);
 160free_exit:
 161        vfree(buf);
 162        return ret;
 163}
 164
 165/**
 166 * dfl_fme_create_mgr - create fpga mgr platform device as child device
 167 *
 168 * @pdata: fme platform_device's pdata
 169 *
 170 * Return: mgr platform device if successful, and error code otherwise.
 171 */
 172static struct platform_device *
 173dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata,
 174                   struct dfl_feature *feature)
 175{
 176        struct platform_device *mgr, *fme = pdata->dev;
 177        struct dfl_fme_mgr_pdata mgr_pdata;
 178        int ret = -ENOMEM;
 179
 180        if (!feature->ioaddr)
 181                return ERR_PTR(-ENODEV);
 182
 183        mgr_pdata.ioaddr = feature->ioaddr;
 184
 185        /*
 186         * Each FME has only one fpga-mgr, so allocate platform device using
 187         * the same FME platform device id.
 188         */
 189        mgr = platform_device_alloc(DFL_FPGA_FME_MGR, fme->id);
 190        if (!mgr)
 191                return ERR_PTR(ret);
 192
 193        mgr->dev.parent = &fme->dev;
 194
 195        ret = platform_device_add_data(mgr, &mgr_pdata, sizeof(mgr_pdata));
 196        if (ret)
 197                goto create_mgr_err;
 198
 199        ret = platform_device_add(mgr);
 200        if (ret)
 201                goto create_mgr_err;
 202
 203        return mgr;
 204
 205create_mgr_err:
 206        platform_device_put(mgr);
 207        return ERR_PTR(ret);
 208}
 209
 210/**
 211 * dfl_fme_destroy_mgr - destroy fpga mgr platform device
 212 * @pdata: fme platform device's pdata
 213 */
 214static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
 215{
 216        struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
 217
 218        platform_device_unregister(priv->mgr);
 219}
 220
 221/**
 222 * dfl_fme_create_bridge - create fme fpga bridge platform device as child
 223 *
 224 * @pdata: fme platform device's pdata
 225 * @port_id: port id for the bridge to be created.
 226 *
 227 * Return: bridge platform device if successful, and error code otherwise.
 228 */
 229static struct dfl_fme_bridge *
 230dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
 231{
 232        struct device *dev = &pdata->dev->dev;
 233        struct dfl_fme_br_pdata br_pdata;
 234        struct dfl_fme_bridge *fme_br;
 235        int ret = -ENOMEM;
 236
 237        fme_br = devm_kzalloc(dev, sizeof(*fme_br), GFP_KERNEL);
 238        if (!fme_br)
 239                return ERR_PTR(ret);
 240
 241        br_pdata.cdev = pdata->dfl_cdev;
 242        br_pdata.port_id = port_id;
 243
 244        fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE,
 245                                           PLATFORM_DEVID_AUTO);
 246        if (!fme_br->br)
 247                return ERR_PTR(ret);
 248
 249        fme_br->br->dev.parent = dev;
 250
 251        ret = platform_device_add_data(fme_br->br, &br_pdata, sizeof(br_pdata));
 252        if (ret)
 253                goto create_br_err;
 254
 255        ret = platform_device_add(fme_br->br);
 256        if (ret)
 257                goto create_br_err;
 258
 259        return fme_br;
 260
 261create_br_err:
 262        platform_device_put(fme_br->br);
 263        return ERR_PTR(ret);
 264}
 265
 266/**
 267 * dfl_fme_destroy_bridge - destroy fpga bridge platform device
 268 * @fme_br: fme bridge to destroy
 269 */
 270static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
 271{
 272        platform_device_unregister(fme_br->br);
 273}
 274
 275/**
 276 * dfl_fme_destroy_bridge - destroy all fpga bridge platform device
 277 * @pdata: fme platform device's pdata
 278 */
 279static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
 280{
 281        struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
 282        struct dfl_fme_bridge *fbridge, *tmp;
 283
 284        list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) {
 285                list_del(&fbridge->node);
 286                dfl_fme_destroy_bridge(fbridge);
 287        }
 288}
 289
 290/**
 291 * dfl_fme_create_region - create fpga region platform device as child
 292 *
 293 * @pdata: fme platform device's pdata
 294 * @mgr: mgr platform device needed for region
 295 * @br: br platform device needed for region
 296 * @port_id: port id
 297 *
 298 * Return: fme region if successful, and error code otherwise.
 299 */
 300static struct dfl_fme_region *
 301dfl_fme_create_region(struct dfl_feature_platform_data *pdata,
 302                      struct platform_device *mgr,
 303                      struct platform_device *br, int port_id)
 304{
 305        struct dfl_fme_region_pdata region_pdata;
 306        struct device *dev = &pdata->dev->dev;
 307        struct dfl_fme_region *fme_region;
 308        int ret = -ENOMEM;
 309
 310        fme_region = devm_kzalloc(dev, sizeof(*fme_region), GFP_KERNEL);
 311        if (!fme_region)
 312                return ERR_PTR(ret);
 313
 314        region_pdata.mgr = mgr;
 315        region_pdata.br = br;
 316
 317        /*
 318         * Each FPGA device may have more than one port, so allocate platform
 319         * device using the same port platform device id.
 320         */
 321        fme_region->region = platform_device_alloc(DFL_FPGA_FME_REGION, br->id);
 322        if (!fme_region->region)
 323                return ERR_PTR(ret);
 324
 325        fme_region->region->dev.parent = dev;
 326
 327        ret = platform_device_add_data(fme_region->region, &region_pdata,
 328                                       sizeof(region_pdata));
 329        if (ret)
 330                goto create_region_err;
 331
 332        ret = platform_device_add(fme_region->region);
 333        if (ret)
 334                goto create_region_err;
 335
 336        fme_region->port_id = port_id;
 337
 338        return fme_region;
 339
 340create_region_err:
 341        platform_device_put(fme_region->region);
 342        return ERR_PTR(ret);
 343}
 344
 345/**
 346 * dfl_fme_destroy_region - destroy fme region
 347 * @fme_region: fme region to destroy
 348 */
 349static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region)
 350{
 351        platform_device_unregister(fme_region->region);
 352}
 353
 354/**
 355 * dfl_fme_destroy_regions - destroy all fme regions
 356 * @pdata: fme platform device's pdata
 357 */
 358static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
 359{
 360        struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
 361        struct dfl_fme_region *fme_region, *tmp;
 362
 363        list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) {
 364                list_del(&fme_region->node);
 365                dfl_fme_destroy_region(fme_region);
 366        }
 367}
 368
 369static int pr_mgmt_init(struct platform_device *pdev,
 370                        struct dfl_feature *feature)
 371{
 372        struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
 373        struct dfl_fme_region *fme_region;
 374        struct dfl_fme_bridge *fme_br;
 375        struct platform_device *mgr;
 376        struct dfl_fme *priv;
 377        void __iomem *fme_hdr;
 378        int ret = -ENODEV, i = 0;
 379        u64 fme_cap, port_offset;
 380
 381        fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
 382                                               FME_FEATURE_ID_HEADER);
 383
 384        mutex_lock(&pdata->lock);
 385        priv = dfl_fpga_pdata_get_private(pdata);
 386
 387        /* Initialize the region and bridge sub device list */
 388        INIT_LIST_HEAD(&priv->region_list);
 389        INIT_LIST_HEAD(&priv->bridge_list);
 390
 391        /* Create fpga mgr platform device */
 392        mgr = dfl_fme_create_mgr(pdata, feature);
 393        if (IS_ERR(mgr)) {
 394                dev_err(&pdev->dev, "fail to create fpga mgr pdev\n");
 395                goto unlock;
 396        }
 397
 398        priv->mgr = mgr;
 399
 400        /* Read capability register to check number of regions and bridges */
 401        fme_cap = readq(fme_hdr + FME_HDR_CAP);
 402        for (; i < FIELD_GET(FME_CAP_NUM_PORTS, fme_cap); i++) {
 403                port_offset = readq(fme_hdr + FME_HDR_PORT_OFST(i));
 404                if (!(port_offset & FME_PORT_OFST_IMP))
 405                        continue;
 406
 407                /* Create bridge for each port */
 408                fme_br = dfl_fme_create_bridge(pdata, i);
 409                if (IS_ERR(fme_br)) {
 410                        ret = PTR_ERR(fme_br);
 411                        goto destroy_region;
 412                }
 413
 414                list_add(&fme_br->node, &priv->bridge_list);
 415
 416                /* Create region for each port */
 417                fme_region = dfl_fme_create_region(pdata, mgr,
 418                                                   fme_br->br, i);
 419                if (IS_ERR(fme_region)) {
 420                        ret = PTR_ERR(fme_region);
 421                        goto destroy_region;
 422                }
 423
 424                list_add(&fme_region->node, &priv->region_list);
 425        }
 426        mutex_unlock(&pdata->lock);
 427
 428        return 0;
 429
 430destroy_region:
 431        dfl_fme_destroy_regions(pdata);
 432        dfl_fme_destroy_bridges(pdata);
 433        dfl_fme_destroy_mgr(pdata);
 434unlock:
 435        mutex_unlock(&pdata->lock);
 436        return ret;
 437}
 438
 439static void pr_mgmt_uinit(struct platform_device *pdev,
 440                          struct dfl_feature *feature)
 441{
 442        struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
 443
 444        mutex_lock(&pdata->lock);
 445
 446        dfl_fme_destroy_regions(pdata);
 447        dfl_fme_destroy_bridges(pdata);
 448        dfl_fme_destroy_mgr(pdata);
 449        mutex_unlock(&pdata->lock);
 450}
 451
 452static long fme_pr_ioctl(struct platform_device *pdev,
 453                         struct dfl_feature *feature,
 454                         unsigned int cmd, unsigned long arg)
 455{
 456        long ret;
 457
 458        switch (cmd) {
 459        case DFL_FPGA_FME_PORT_PR:
 460                ret = fme_pr(pdev, arg);
 461                break;
 462        default:
 463                ret = -ENODEV;
 464        }
 465
 466        return ret;
 467}
 468
 469const struct dfl_feature_id fme_pr_mgmt_id_table[] = {
 470        {.id = FME_FEATURE_ID_PR_MGMT,},
 471        {0}
 472};
 473
 474const struct dfl_feature_ops fme_pr_mgmt_ops = {
 475        .init = pr_mgmt_init,
 476        .uinit = pr_mgmt_uinit,
 477        .ioctl = fme_pr_ioctl,
 478};
 479