linux/drivers/fpga/dfl-afu-main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Driver for FPGA Accelerated Function Unit (AFU)
   4 *
   5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
   6 *
   7 * Authors:
   8 *   Wu Hao <hao.wu@intel.com>
   9 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
  10 *   Joseph Grecco <joe.grecco@intel.com>
  11 *   Enno Luebbers <enno.luebbers@intel.com>
  12 *   Tim Whisonant <tim.whisonant@intel.com>
  13 *   Ananda Ravuri <ananda.ravuri@intel.com>
  14 *   Henry Mitchel <henry.mitchel@intel.com>
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/uaccess.h>
  20#include <linux/fpga-dfl.h>
  21
  22#include "dfl-afu.h"
  23
  24/**
  25 * __afu_port_enable - enable a port by clear reset
  26 * @pdev: port platform device.
  27 *
  28 * Enable Port by clear the port soft reset bit, which is set by default.
  29 * The AFU is unable to respond to any MMIO access while in reset.
  30 * __afu_port_enable function should only be used after __afu_port_disable
  31 * function.
  32 *
  33 * The caller needs to hold lock for protection.
  34 */
  35void __afu_port_enable(struct platform_device *pdev)
  36{
  37        struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  38        void __iomem *base;
  39        u64 v;
  40
  41        WARN_ON(!pdata->disable_count);
  42
  43        if (--pdata->disable_count != 0)
  44                return;
  45
  46        base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
  47
  48        /* Clear port soft reset */
  49        v = readq(base + PORT_HDR_CTRL);
  50        v &= ~PORT_CTRL_SFTRST;
  51        writeq(v, base + PORT_HDR_CTRL);
  52}
  53
  54#define RST_POLL_INVL 10 /* us */
  55#define RST_POLL_TIMEOUT 1000 /* us */
  56
  57/**
  58 * __afu_port_disable - disable a port by hold reset
  59 * @pdev: port platform device.
  60 *
  61 * Disable Port by setting the port soft reset bit, it puts the port into reset.
  62 *
  63 * The caller needs to hold lock for protection.
  64 */
  65int __afu_port_disable(struct platform_device *pdev)
  66{
  67        struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  68        void __iomem *base;
  69        u64 v;
  70
  71        if (pdata->disable_count++ != 0)
  72                return 0;
  73
  74        base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
  75
  76        /* Set port soft reset */
  77        v = readq(base + PORT_HDR_CTRL);
  78        v |= PORT_CTRL_SFTRST;
  79        writeq(v, base + PORT_HDR_CTRL);
  80
  81        /*
  82         * HW sets ack bit to 1 when all outstanding requests have been drained
  83         * on this port and minimum soft reset pulse width has elapsed.
  84         * Driver polls port_soft_reset_ack to determine if reset done by HW.
  85         */
  86        if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
  87                               v & PORT_CTRL_SFTRST_ACK,
  88                               RST_POLL_INVL, RST_POLL_TIMEOUT)) {
  89                dev_err(&pdev->dev, "timeout, fail to reset device\n");
  90                return -ETIMEDOUT;
  91        }
  92
  93        return 0;
  94}
  95
  96/*
  97 * This function resets the FPGA Port and its accelerator (AFU) by function
  98 * __port_disable and __port_enable (set port soft reset bit and then clear
  99 * it). Userspace can do Port reset at any time, e.g. during DMA or Partial
 100 * Reconfiguration. But it should never cause any system level issue, only
 101 * functional failure (e.g. DMA or PR operation failure) and be recoverable
 102 * from the failure.
 103 *
 104 * Note: the accelerator (AFU) is not accessible when its port is in reset
 105 * (disabled). Any attempts on MMIO access to AFU while in reset, will
 106 * result errors reported via port error reporting sub feature (if present).
 107 */
 108static int __port_reset(struct platform_device *pdev)
 109{
 110        int ret;
 111
 112        ret = __afu_port_disable(pdev);
 113        if (!ret)
 114                __afu_port_enable(pdev);
 115
 116        return ret;
 117}
 118
 119static int port_reset(struct platform_device *pdev)
 120{
 121        struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
 122        int ret;
 123
 124        mutex_lock(&pdata->lock);
 125        ret = __port_reset(pdev);
 126        mutex_unlock(&pdata->lock);
 127
 128        return ret;
 129}
 130
 131static int port_get_id(struct platform_device *pdev)
 132{
 133        void __iomem *base;
 134
 135        base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
 136
 137        return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
 138}
 139
 140static ssize_t
 141id_show(struct device *dev, struct device_attribute *attr, char *buf)
 142{
 143        int id = port_get_id(to_platform_device(dev));
 144
 145        return scnprintf(buf, PAGE_SIZE, "%d\n", id);
 146}
 147static DEVICE_ATTR_RO(id);
 148
 149static ssize_t
 150ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
 151{
 152        struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
 153        void __iomem *base;
 154        u64 v;
 155
 156        base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
 157
 158        mutex_lock(&pdata->lock);
 159        v = readq(base + PORT_HDR_CTRL);
 160        mutex_unlock(&pdata->lock);
 161
 162        return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
 163}
 164
 165static ssize_t
 166ltr_store(struct device *dev, struct device_attribute *attr,
 167          const char *buf, size_t count)
 168{
 169        struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
 170        void __iomem *base;
 171        bool ltr;
 172        u64 v;
 173
 174        if (kstrtobool(buf, &ltr))
 175                return -EINVAL;
 176
 177        base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
 178
 179        mutex_lock(&pdata->lock);
 180        v = readq(base + PORT_HDR_CTRL);
 181        v &= ~PORT_CTRL_LATENCY;
 182        v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
 183        writeq(v, base + PORT_HDR_CTRL);
 184        mutex_unlock(&pdata->lock);
 185
 186        return count;
 187}
 188static DEVICE_ATTR_RW(ltr);
 189
 190static ssize_t
 191ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
 192{
 193        struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
 194        void __iomem *base;
 195        u64 v;
 196
 197        base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
 198
 199        mutex_lock(&pdata->lock);
 200        v = readq(base + PORT_HDR_STS);
 201        mutex_unlock(&pdata->lock);
 202
 203        return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
 204}
 205
 206static ssize_t
 207ap1_event_store(struct device *dev, struct device_attribute *attr,
 208                const char *buf, size_t count)
 209{
 210        struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
 211        void __iomem *base;
 212        bool clear;
 213
 214        if (kstrtobool(buf, &clear) || !clear)
 215                return -EINVAL;
 216
 217        base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
 218
 219        mutex_lock(&pdata->lock);
 220        writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
 221        mutex_unlock(&pdata->lock);
 222
 223        return count;
 224}
 225static DEVICE_ATTR_RW(ap1_event);
 226
 227static ssize_t
 228ap2_event_show(struct device *dev, struct device_attribute *attr,
 229               char *buf)
 230{
 231        struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
 232        void __iomem *base;
 233        u64 v;
 234
 235        base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
 236
 237        mutex_lock(&pdata->lock);
 238        v = readq(base + PORT_HDR_STS);
 239        mutex_unlock(&pdata->lock);
 240
 241        return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
 242}
 243
 244static ssize_t
 245ap2_event_store(struct device *dev, struct device_attribute *attr,
 246                const char *buf, size_t count)
 247{
 248        struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
 249        void __iomem *base;
 250        bool clear;
 251
 252        if (kstrtobool(buf, &clear) || !clear)
 253                return -EINVAL;
 254
 255        base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
 256
 257        mutex_lock(&pdata->lock);
 258        writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
 259        mutex_unlock(&pdata->lock);
 260
 261        return count;
 262}
 263static DEVICE_ATTR_RW(ap2_event);
 264
 265static ssize_t
 266power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
 267{
 268        struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
 269        void __iomem *base;
 270        u64 v;
 271
 272        base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
 273
 274        mutex_lock(&pdata->lock);
 275        v = readq(base + PORT_HDR_STS);
 276        mutex_unlock(&pdata->lock);
 277
 278        return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
 279}
 280static DEVICE_ATTR_RO(power_state);
 281
 282static ssize_t
 283userclk_freqcmd_store(struct device *dev, struct device_attribute *attr,
 284                      const char *buf, size_t count)
 285{
 286        struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
 287        u64 userclk_freq_cmd;
 288        void __iomem *base;
 289
 290        if (kstrtou64(buf, 0, &userclk_freq_cmd))
 291                return -EINVAL;
 292
 293        base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
 294
 295        mutex_lock(&pdata->lock);
 296        writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0);
 297        mutex_unlock(&pdata->lock);
 298
 299        return count;
 300}
 301static DEVICE_ATTR_WO(userclk_freqcmd);
 302
 303static ssize_t
 304userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr,
 305                          const char *buf, size_t count)
 306{
 307        struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
 308        u64 userclk_freqcntr_cmd;
 309        void __iomem *base;
 310
 311        if (kstrtou64(buf, 0, &userclk_freqcntr_cmd))
 312                return -EINVAL;
 313
 314        base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
 315
 316        mutex_lock(&pdata->lock);
 317        writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1);
 318        mutex_unlock(&pdata->lock);
 319
 320        return count;
 321}
 322static DEVICE_ATTR_WO(userclk_freqcntrcmd);
 323
 324static ssize_t
 325userclk_freqsts_show(struct device *dev, struct device_attribute *attr,
 326                     char *buf)
 327{
 328        struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
 329        u64 userclk_freqsts;
 330        void __iomem *base;
 331
 332        base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
 333
 334        mutex_lock(&pdata->lock);
 335        userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0);
 336        mutex_unlock(&pdata->lock);
 337
 338        return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts);
 339}
 340static DEVICE_ATTR_RO(userclk_freqsts);
 341
 342static ssize_t
 343userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr,
 344                         char *buf)
 345{
 346        struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
 347        u64 userclk_freqcntrsts;
 348        void __iomem *base;
 349
 350        base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
 351
 352        mutex_lock(&pdata->lock);
 353        userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1);
 354        mutex_unlock(&pdata->lock);
 355
 356        return sprintf(buf, "0x%llx\n",
 357                       (unsigned long long)userclk_freqcntrsts);
 358}
 359static DEVICE_ATTR_RO(userclk_freqcntrsts);
 360
 361static struct attribute *port_hdr_attrs[] = {
 362        &dev_attr_id.attr,
 363        &dev_attr_ltr.attr,
 364        &dev_attr_ap1_event.attr,
 365        &dev_attr_ap2_event.attr,
 366        &dev_attr_power_state.attr,
 367        &dev_attr_userclk_freqcmd.attr,
 368        &dev_attr_userclk_freqcntrcmd.attr,
 369        &dev_attr_userclk_freqsts.attr,
 370        &dev_attr_userclk_freqcntrsts.attr,
 371        NULL,
 372};
 373
 374static umode_t port_hdr_attrs_visible(struct kobject *kobj,
 375                                      struct attribute *attr, int n)
 376{
 377        struct device *dev = kobj_to_dev(kobj);
 378        umode_t mode = attr->mode;
 379        void __iomem *base;
 380
 381        base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
 382
 383        if (dfl_feature_revision(base) > 0) {
 384                /*
 385                 * userclk sysfs interfaces are only visible in case port
 386                 * revision is 0, as hardware with revision >0 doesn't
 387                 * support this.
 388                 */
 389                if (attr == &dev_attr_userclk_freqcmd.attr ||
 390                    attr == &dev_attr_userclk_freqcntrcmd.attr ||
 391                    attr == &dev_attr_userclk_freqsts.attr ||
 392                    attr == &dev_attr_userclk_freqcntrsts.attr)
 393                        mode = 0;
 394        }
 395
 396        return mode;
 397}
 398
 399static const struct attribute_group port_hdr_group = {
 400        .attrs      = port_hdr_attrs,
 401        .is_visible = port_hdr_attrs_visible,
 402};
 403
 404static int port_hdr_init(struct platform_device *pdev,
 405                         struct dfl_feature *feature)
 406{
 407        port_reset(pdev);
 408
 409        return 0;
 410}
 411
 412static long
 413port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
 414               unsigned int cmd, unsigned long arg)
 415{
 416        long ret;
 417
 418        switch (cmd) {
 419        case DFL_FPGA_PORT_RESET:
 420                if (!arg)
 421                        ret = port_reset(pdev);
 422                else
 423                        ret = -EINVAL;
 424                break;
 425        default:
 426                dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
 427                ret = -ENODEV;
 428        }
 429
 430        return ret;
 431}
 432
 433static const struct dfl_feature_id port_hdr_id_table[] = {
 434        {.id = PORT_FEATURE_ID_HEADER,},
 435        {0,}
 436};
 437
 438static const struct dfl_feature_ops port_hdr_ops = {
 439        .init = port_hdr_init,
 440        .ioctl = port_hdr_ioctl,
 441};
 442
 443static ssize_t
 444afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
 445{
 446        struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
 447        void __iomem *base;
 448        u64 guidl, guidh;
 449
 450        base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
 451
 452        mutex_lock(&pdata->lock);
 453        if (pdata->disable_count) {
 454                mutex_unlock(&pdata->lock);
 455                return -EBUSY;
 456        }
 457
 458        guidl = readq(base + GUID_L);
 459        guidh = readq(base + GUID_H);
 460        mutex_unlock(&pdata->lock);
 461
 462        return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
 463}
 464static DEVICE_ATTR_RO(afu_id);
 465
 466static struct attribute *port_afu_attrs[] = {
 467        &dev_attr_afu_id.attr,
 468        NULL
 469};
 470
 471static umode_t port_afu_attrs_visible(struct kobject *kobj,
 472                                      struct attribute *attr, int n)
 473{
 474        struct device *dev = kobj_to_dev(kobj);
 475
 476        /*
 477         * sysfs entries are visible only if related private feature is
 478         * enumerated.
 479         */
 480        if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU))
 481                return 0;
 482
 483        return attr->mode;
 484}
 485
 486static const struct attribute_group port_afu_group = {
 487        .attrs      = port_afu_attrs,
 488        .is_visible = port_afu_attrs_visible,
 489};
 490
 491static int port_afu_init(struct platform_device *pdev,
 492                         struct dfl_feature *feature)
 493{
 494        struct resource *res = &pdev->resource[feature->resource_index];
 495
 496        return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
 497                                   DFL_PORT_REGION_INDEX_AFU,
 498                                   resource_size(res), res->start,
 499                                   DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
 500                                   DFL_PORT_REGION_WRITE);
 501}
 502
 503static const struct dfl_feature_id port_afu_id_table[] = {
 504        {.id = PORT_FEATURE_ID_AFU,},
 505        {0,}
 506};
 507
 508static const struct dfl_feature_ops port_afu_ops = {
 509        .init = port_afu_init,
 510};
 511
 512static int port_stp_init(struct platform_device *pdev,
 513                         struct dfl_feature *feature)
 514{
 515        struct resource *res = &pdev->resource[feature->resource_index];
 516
 517        return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
 518                                   DFL_PORT_REGION_INDEX_STP,
 519                                   resource_size(res), res->start,
 520                                   DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
 521                                   DFL_PORT_REGION_WRITE);
 522}
 523
 524static const struct dfl_feature_id port_stp_id_table[] = {
 525        {.id = PORT_FEATURE_ID_STP,},
 526        {0,}
 527};
 528
 529static const struct dfl_feature_ops port_stp_ops = {
 530        .init = port_stp_init,
 531};
 532
 533static long
 534port_uint_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
 535                unsigned int cmd, unsigned long arg)
 536{
 537        switch (cmd) {
 538        case DFL_FPGA_PORT_UINT_GET_IRQ_NUM:
 539                return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
 540        case DFL_FPGA_PORT_UINT_SET_IRQ:
 541                return dfl_feature_ioctl_set_irq(pdev, feature, arg);
 542        default:
 543                dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
 544                return -ENODEV;
 545        }
 546}
 547
 548static const struct dfl_feature_id port_uint_id_table[] = {
 549        {.id = PORT_FEATURE_ID_UINT,},
 550        {0,}
 551};
 552
 553static const struct dfl_feature_ops port_uint_ops = {
 554        .ioctl = port_uint_ioctl,
 555};
 556
 557static struct dfl_feature_driver port_feature_drvs[] = {
 558        {
 559                .id_table = port_hdr_id_table,
 560                .ops = &port_hdr_ops,
 561        },
 562        {
 563                .id_table = port_afu_id_table,
 564                .ops = &port_afu_ops,
 565        },
 566        {
 567                .id_table = port_err_id_table,
 568                .ops = &port_err_ops,
 569        },
 570        {
 571                .id_table = port_stp_id_table,
 572                .ops = &port_stp_ops,
 573        },
 574        {
 575                .id_table = port_uint_id_table,
 576                .ops = &port_uint_ops,
 577        },
 578        {
 579                .ops = NULL,
 580        }
 581};
 582
 583static int afu_open(struct inode *inode, struct file *filp)
 584{
 585        struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
 586        struct dfl_feature_platform_data *pdata;
 587        int ret;
 588
 589        pdata = dev_get_platdata(&fdev->dev);
 590        if (WARN_ON(!pdata))
 591                return -ENODEV;
 592
 593        mutex_lock(&pdata->lock);
 594        ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
 595        if (!ret) {
 596                dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
 597                        dfl_feature_dev_use_count(pdata));
 598                filp->private_data = fdev;
 599        }
 600        mutex_unlock(&pdata->lock);
 601
 602        return ret;
 603}
 604
 605static int afu_release(struct inode *inode, struct file *filp)
 606{
 607        struct platform_device *pdev = filp->private_data;
 608        struct dfl_feature_platform_data *pdata;
 609        struct dfl_feature *feature;
 610
 611        dev_dbg(&pdev->dev, "Device File Release\n");
 612
 613        pdata = dev_get_platdata(&pdev->dev);
 614
 615        mutex_lock(&pdata->lock);
 616        dfl_feature_dev_use_end(pdata);
 617
 618        if (!dfl_feature_dev_use_count(pdata)) {
 619                dfl_fpga_dev_for_each_feature(pdata, feature)
 620                        dfl_fpga_set_irq_triggers(feature, 0,
 621                                                  feature->nr_irqs, NULL);
 622                __port_reset(pdev);
 623                afu_dma_region_destroy(pdata);
 624        }
 625        mutex_unlock(&pdata->lock);
 626
 627        return 0;
 628}
 629
 630static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
 631                                      unsigned long arg)
 632{
 633        /* No extension support for now */
 634        return 0;
 635}
 636
 637static long
 638afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
 639{
 640        struct dfl_fpga_port_info info;
 641        struct dfl_afu *afu;
 642        unsigned long minsz;
 643
 644        minsz = offsetofend(struct dfl_fpga_port_info, num_umsgs);
 645
 646        if (copy_from_user(&info, arg, minsz))
 647                return -EFAULT;
 648
 649        if (info.argsz < minsz)
 650                return -EINVAL;
 651
 652        mutex_lock(&pdata->lock);
 653        afu = dfl_fpga_pdata_get_private(pdata);
 654        info.flags = 0;
 655        info.num_regions = afu->num_regions;
 656        info.num_umsgs = afu->num_umsgs;
 657        mutex_unlock(&pdata->lock);
 658
 659        if (copy_to_user(arg, &info, sizeof(info)))
 660                return -EFAULT;
 661
 662        return 0;
 663}
 664
 665static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
 666                                      void __user *arg)
 667{
 668        struct dfl_fpga_port_region_info rinfo;
 669        struct dfl_afu_mmio_region region;
 670        unsigned long minsz;
 671        long ret;
 672
 673        minsz = offsetofend(struct dfl_fpga_port_region_info, offset);
 674
 675        if (copy_from_user(&rinfo, arg, minsz))
 676                return -EFAULT;
 677
 678        if (rinfo.argsz < minsz || rinfo.padding)
 679                return -EINVAL;
 680
 681        ret = afu_mmio_region_get_by_index(pdata, rinfo.index, &region);
 682        if (ret)
 683                return ret;
 684
 685        rinfo.flags = region.flags;
 686        rinfo.size = region.size;
 687        rinfo.offset = region.offset;
 688
 689        if (copy_to_user(arg, &rinfo, sizeof(rinfo)))
 690                return -EFAULT;
 691
 692        return 0;
 693}
 694
 695static long
 696afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
 697{
 698        struct dfl_fpga_port_dma_map map;
 699        unsigned long minsz;
 700        long ret;
 701
 702        minsz = offsetofend(struct dfl_fpga_port_dma_map, iova);
 703
 704        if (copy_from_user(&map, arg, minsz))
 705                return -EFAULT;
 706
 707        if (map.argsz < minsz || map.flags)
 708                return -EINVAL;
 709
 710        ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
 711        if (ret)
 712                return ret;
 713
 714        if (copy_to_user(arg, &map, sizeof(map))) {
 715                afu_dma_unmap_region(pdata, map.iova);
 716                return -EFAULT;
 717        }
 718
 719        dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
 720                (unsigned long long)map.user_addr,
 721                (unsigned long long)map.length,
 722                (unsigned long long)map.iova);
 723
 724        return 0;
 725}
 726
 727static long
 728afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
 729{
 730        struct dfl_fpga_port_dma_unmap unmap;
 731        unsigned long minsz;
 732
 733        minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova);
 734
 735        if (copy_from_user(&unmap, arg, minsz))
 736                return -EFAULT;
 737
 738        if (unmap.argsz < minsz || unmap.flags)
 739                return -EINVAL;
 740
 741        return afu_dma_unmap_region(pdata, unmap.iova);
 742}
 743
 744static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 745{
 746        struct platform_device *pdev = filp->private_data;
 747        struct dfl_feature_platform_data *pdata;
 748        struct dfl_feature *f;
 749        long ret;
 750
 751        dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
 752
 753        pdata = dev_get_platdata(&pdev->dev);
 754
 755        switch (cmd) {
 756        case DFL_FPGA_GET_API_VERSION:
 757                return DFL_FPGA_API_VERSION;
 758        case DFL_FPGA_CHECK_EXTENSION:
 759                return afu_ioctl_check_extension(pdata, arg);
 760        case DFL_FPGA_PORT_GET_INFO:
 761                return afu_ioctl_get_info(pdata, (void __user *)arg);
 762        case DFL_FPGA_PORT_GET_REGION_INFO:
 763                return afu_ioctl_get_region_info(pdata, (void __user *)arg);
 764        case DFL_FPGA_PORT_DMA_MAP:
 765                return afu_ioctl_dma_map(pdata, (void __user *)arg);
 766        case DFL_FPGA_PORT_DMA_UNMAP:
 767                return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
 768        default:
 769                /*
 770                 * Let sub-feature's ioctl function to handle the cmd
 771                 * Sub-feature's ioctl returns -ENODEV when cmd is not
 772                 * handled in this sub feature, and returns 0 and other
 773                 * error code if cmd is handled.
 774                 */
 775                dfl_fpga_dev_for_each_feature(pdata, f)
 776                        if (f->ops && f->ops->ioctl) {
 777                                ret = f->ops->ioctl(pdev, f, cmd, arg);
 778                                if (ret != -ENODEV)
 779                                        return ret;
 780                        }
 781        }
 782
 783        return -EINVAL;
 784}
 785
 786static const struct vm_operations_struct afu_vma_ops = {
 787#ifdef CONFIG_HAVE_IOREMAP_PROT
 788        .access = generic_access_phys,
 789#endif
 790};
 791
 792static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
 793{
 794        struct platform_device *pdev = filp->private_data;
 795        struct dfl_feature_platform_data *pdata;
 796        u64 size = vma->vm_end - vma->vm_start;
 797        struct dfl_afu_mmio_region region;
 798        u64 offset;
 799        int ret;
 800
 801        if (!(vma->vm_flags & VM_SHARED))
 802                return -EINVAL;
 803
 804        pdata = dev_get_platdata(&pdev->dev);
 805
 806        offset = vma->vm_pgoff << PAGE_SHIFT;
 807        ret = afu_mmio_region_get_by_offset(pdata, offset, size, &region);
 808        if (ret)
 809                return ret;
 810
 811        if (!(region.flags & DFL_PORT_REGION_MMAP))
 812                return -EINVAL;
 813
 814        if ((vma->vm_flags & VM_READ) && !(region.flags & DFL_PORT_REGION_READ))
 815                return -EPERM;
 816
 817        if ((vma->vm_flags & VM_WRITE) &&
 818            !(region.flags & DFL_PORT_REGION_WRITE))
 819                return -EPERM;
 820
 821        /* Support debug access to the mapping */
 822        vma->vm_ops = &afu_vma_ops;
 823
 824        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 825
 826        return remap_pfn_range(vma, vma->vm_start,
 827                        (region.phys + (offset - region.offset)) >> PAGE_SHIFT,
 828                        size, vma->vm_page_prot);
 829}
 830
 831static const struct file_operations afu_fops = {
 832        .owner = THIS_MODULE,
 833        .open = afu_open,
 834        .release = afu_release,
 835        .unlocked_ioctl = afu_ioctl,
 836        .mmap = afu_mmap,
 837};
 838
 839static int afu_dev_init(struct platform_device *pdev)
 840{
 841        struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
 842        struct dfl_afu *afu;
 843
 844        afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
 845        if (!afu)
 846                return -ENOMEM;
 847
 848        afu->pdata = pdata;
 849
 850        mutex_lock(&pdata->lock);
 851        dfl_fpga_pdata_set_private(pdata, afu);
 852        afu_mmio_region_init(pdata);
 853        afu_dma_region_init(pdata);
 854        mutex_unlock(&pdata->lock);
 855
 856        return 0;
 857}
 858
 859static int afu_dev_destroy(struct platform_device *pdev)
 860{
 861        struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
 862
 863        mutex_lock(&pdata->lock);
 864        afu_mmio_region_destroy(pdata);
 865        afu_dma_region_destroy(pdata);
 866        dfl_fpga_pdata_set_private(pdata, NULL);
 867        mutex_unlock(&pdata->lock);
 868
 869        return 0;
 870}
 871
 872static int port_enable_set(struct platform_device *pdev, bool enable)
 873{
 874        struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
 875        int ret = 0;
 876
 877        mutex_lock(&pdata->lock);
 878        if (enable)
 879                __afu_port_enable(pdev);
 880        else
 881                ret = __afu_port_disable(pdev);
 882        mutex_unlock(&pdata->lock);
 883
 884        return ret;
 885}
 886
 887static struct dfl_fpga_port_ops afu_port_ops = {
 888        .name = DFL_FPGA_FEATURE_DEV_PORT,
 889        .owner = THIS_MODULE,
 890        .get_id = port_get_id,
 891        .enable_set = port_enable_set,
 892};
 893
 894static int afu_probe(struct platform_device *pdev)
 895{
 896        int ret;
 897
 898        dev_dbg(&pdev->dev, "%s\n", __func__);
 899
 900        ret = afu_dev_init(pdev);
 901        if (ret)
 902                goto exit;
 903
 904        ret = dfl_fpga_dev_feature_init(pdev, port_feature_drvs);
 905        if (ret)
 906                goto dev_destroy;
 907
 908        ret = dfl_fpga_dev_ops_register(pdev, &afu_fops, THIS_MODULE);
 909        if (ret) {
 910                dfl_fpga_dev_feature_uinit(pdev);
 911                goto dev_destroy;
 912        }
 913
 914        return 0;
 915
 916dev_destroy:
 917        afu_dev_destroy(pdev);
 918exit:
 919        return ret;
 920}
 921
 922static int afu_remove(struct platform_device *pdev)
 923{
 924        dev_dbg(&pdev->dev, "%s\n", __func__);
 925
 926        dfl_fpga_dev_ops_unregister(pdev);
 927        dfl_fpga_dev_feature_uinit(pdev);
 928        afu_dev_destroy(pdev);
 929
 930        return 0;
 931}
 932
 933static const struct attribute_group *afu_dev_groups[] = {
 934        &port_hdr_group,
 935        &port_afu_group,
 936        &port_err_group,
 937        NULL
 938};
 939
 940static struct platform_driver afu_driver = {
 941        .driver = {
 942                .name       = DFL_FPGA_FEATURE_DEV_PORT,
 943                .dev_groups = afu_dev_groups,
 944        },
 945        .probe   = afu_probe,
 946        .remove  = afu_remove,
 947};
 948
 949static int __init afu_init(void)
 950{
 951        int ret;
 952
 953        dfl_fpga_port_ops_add(&afu_port_ops);
 954
 955        ret = platform_driver_register(&afu_driver);
 956        if (ret)
 957                dfl_fpga_port_ops_del(&afu_port_ops);
 958
 959        return ret;
 960}
 961
 962static void __exit afu_exit(void)
 963{
 964        platform_driver_unregister(&afu_driver);
 965
 966        dfl_fpga_port_ops_del(&afu_port_ops);
 967}
 968
 969module_init(afu_init);
 970module_exit(afu_exit);
 971
 972MODULE_DESCRIPTION("FPGA Accelerated Function Unit driver");
 973MODULE_AUTHOR("Intel Corporation");
 974MODULE_LICENSE("GPL v2");
 975MODULE_ALIAS("platform:dfl-port");
 976