linux/drivers/staging/gasket/gasket_core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Gasket generic driver framework. This file contains the implementation
   4 * for the Gasket generic driver framework - the functionality that is common
   5 * across Gasket devices.
   6 *
   7 * Copyright (C) 2018 Google, Inc.
   8 */
   9
  10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11
  12#include "gasket_core.h"
  13
  14#include "gasket_interrupt.h"
  15#include "gasket_ioctl.h"
  16#include "gasket_page_table.h"
  17#include "gasket_sysfs.h"
  18
  19#include <linux/capability.h>
  20#include <linux/compiler.h>
  21#include <linux/delay.h>
  22#include <linux/device.h>
  23#include <linux/fs.h>
  24#include <linux/init.h>
  25#include <linux/of.h>
  26#include <linux/pid_namespace.h>
  27#include <linux/printk.h>
  28#include <linux/sched.h>
  29
  30#ifdef GASKET_KERNEL_TRACE_SUPPORT
  31#define CREATE_TRACE_POINTS
  32#include <trace/events/gasket_mmap.h>
  33#else
  34#define trace_gasket_mmap_exit(x)
  35#define trace_gasket_mmap_entry(x, ...)
  36#endif
  37
  38/*
  39 * "Private" members of gasket_driver_desc.
  40 *
  41 * Contains internal per-device type tracking data, i.e., data not appropriate
  42 * as part of the public interface for the generic framework.
  43 */
  44struct gasket_internal_desc {
  45        /* Device-specific-driver-provided configuration information. */
  46        const struct gasket_driver_desc *driver_desc;
  47
  48        /* Protects access to per-driver data (i.e. this structure). */
  49        struct mutex mutex;
  50
  51        /* Kernel-internal device class. */
  52        struct class *class;
  53
  54        /* Instantiated / present devices of this type. */
  55        struct gasket_dev *devs[GASKET_DEV_MAX];
  56};
  57
  58/* do_map_region() needs be able to return more than just true/false. */
  59enum do_map_region_status {
  60        /* The region was successfully mapped. */
  61        DO_MAP_REGION_SUCCESS,
  62
  63        /* Attempted to map region and failed. */
  64        DO_MAP_REGION_FAILURE,
  65
  66        /* The requested region to map was not part of a mappable region. */
  67        DO_MAP_REGION_INVALID,
  68};
  69
  70/* Global data definitions. */
  71/* Mutex - only for framework-wide data. Other data should be protected by
  72 * finer-grained locks.
  73 */
  74static DEFINE_MUTEX(g_mutex);
  75
  76/* List of all registered device descriptions & their supporting data. */
  77static struct gasket_internal_desc g_descs[GASKET_FRAMEWORK_DESC_MAX];
  78
  79/* Mapping of statuses to human-readable strings. Must end with {0,NULL}. */
  80static const struct gasket_num_name gasket_status_name_table[] = {
  81        { GASKET_STATUS_DEAD, "DEAD" },
  82        { GASKET_STATUS_ALIVE, "ALIVE" },
  83        { GASKET_STATUS_LAMED, "LAMED" },
  84        { GASKET_STATUS_DRIVER_EXIT, "DRIVER_EXITING" },
  85        { 0, NULL },
  86};
  87
  88/* Enumeration of the automatic Gasket framework sysfs nodes. */
  89enum gasket_sysfs_attribute_type {
  90        ATTR_BAR_OFFSETS,
  91        ATTR_BAR_SIZES,
  92        ATTR_DRIVER_VERSION,
  93        ATTR_FRAMEWORK_VERSION,
  94        ATTR_DEVICE_TYPE,
  95        ATTR_HARDWARE_REVISION,
  96        ATTR_PCI_ADDRESS,
  97        ATTR_STATUS,
  98        ATTR_IS_DEVICE_OWNED,
  99        ATTR_DEVICE_OWNER,
 100        ATTR_WRITE_OPEN_COUNT,
 101        ATTR_RESET_COUNT,
 102        ATTR_USER_MEM_RANGES
 103};
 104
 105/* Perform a standard Gasket callback. */
 106static inline int
 107check_and_invoke_callback(struct gasket_dev *gasket_dev,
 108                          int (*cb_function)(struct gasket_dev *))
 109{
 110        int ret = 0;
 111
 112        if (cb_function) {
 113                mutex_lock(&gasket_dev->mutex);
 114                ret = cb_function(gasket_dev);
 115                mutex_unlock(&gasket_dev->mutex);
 116        }
 117        return ret;
 118}
 119
 120/* Perform a standard Gasket callback without grabbing gasket_dev->mutex. */
 121static inline int
 122gasket_check_and_invoke_callback_nolock(struct gasket_dev *gasket_dev,
 123                                        int (*cb_function)(struct gasket_dev *))
 124{
 125        int ret = 0;
 126
 127        if (cb_function)
 128                ret = cb_function(gasket_dev);
 129        return ret;
 130}
 131
 132/*
 133 * Return nonzero if the gasket_cdev_info is owned by the current thread group
 134 * ID.
 135 */
 136static int gasket_owned_by_current_tgid(struct gasket_cdev_info *info)
 137{
 138        return (info->ownership.is_owned &&
 139                (info->ownership.owner == current->tgid));
 140}
 141
 142/*
 143 * Find the next free gasket_internal_dev slot.
 144 *
 145 * Returns the located slot number on success or a negative number on failure.
 146 */
 147static int gasket_find_dev_slot(struct gasket_internal_desc *internal_desc,
 148                                const char *kobj_name)
 149{
 150        int i;
 151
 152        mutex_lock(&internal_desc->mutex);
 153
 154        /* Search for a previous instance of this device. */
 155        for (i = 0; i < GASKET_DEV_MAX; i++) {
 156                if (internal_desc->devs[i] &&
 157                    strcmp(internal_desc->devs[i]->kobj_name, kobj_name) == 0) {
 158                        pr_err("Duplicate device %s\n", kobj_name);
 159                        mutex_unlock(&internal_desc->mutex);
 160                        return -EBUSY;
 161                }
 162        }
 163
 164        /* Find a free device slot. */
 165        for (i = 0; i < GASKET_DEV_MAX; i++) {
 166                if (!internal_desc->devs[i])
 167                        break;
 168        }
 169
 170        if (i == GASKET_DEV_MAX) {
 171                pr_err("Too many registered devices; max %d\n", GASKET_DEV_MAX);
 172                mutex_unlock(&internal_desc->mutex);
 173                return -EBUSY;
 174        }
 175
 176        mutex_unlock(&internal_desc->mutex);
 177        return i;
 178}
 179
 180/*
 181 * Allocate and initialize a Gasket device structure, add the device to the
 182 * device list.
 183 *
 184 * Returns 0 if successful, a negative error code otherwise.
 185 */
 186static int gasket_alloc_dev(struct gasket_internal_desc *internal_desc,
 187                            struct device *parent, struct gasket_dev **pdev)
 188{
 189        int dev_idx;
 190        const struct gasket_driver_desc *driver_desc =
 191                internal_desc->driver_desc;
 192        struct gasket_dev *gasket_dev;
 193        struct gasket_cdev_info *dev_info;
 194        const char *parent_name = dev_name(parent);
 195
 196        pr_debug("Allocating a Gasket device, parent %s.\n", parent_name);
 197
 198        *pdev = NULL;
 199
 200        dev_idx = gasket_find_dev_slot(internal_desc, parent_name);
 201        if (dev_idx < 0)
 202                return dev_idx;
 203
 204        gasket_dev = *pdev = kzalloc(sizeof(*gasket_dev), GFP_KERNEL);
 205        if (!gasket_dev) {
 206                pr_err("no memory for device, parent %s\n", parent_name);
 207                return -ENOMEM;
 208        }
 209        internal_desc->devs[dev_idx] = gasket_dev;
 210
 211        mutex_init(&gasket_dev->mutex);
 212
 213        gasket_dev->internal_desc = internal_desc;
 214        gasket_dev->dev_idx = dev_idx;
 215        snprintf(gasket_dev->kobj_name, GASKET_NAME_MAX, "%s", parent_name);
 216        gasket_dev->dev = get_device(parent);
 217        /* gasket_bar_data is uninitialized. */
 218        gasket_dev->num_page_tables = driver_desc->num_page_tables;
 219        /* max_page_table_size and *page table are uninit'ed */
 220        /* interrupt_data is not initialized. */
 221        /* status is 0, or GASKET_STATUS_DEAD */
 222
 223        dev_info = &gasket_dev->dev_info;
 224        snprintf(dev_info->name, GASKET_NAME_MAX, "%s_%u", driver_desc->name,
 225                 gasket_dev->dev_idx);
 226        dev_info->devt =
 227                MKDEV(driver_desc->major, driver_desc->minor +
 228                      gasket_dev->dev_idx);
 229        dev_info->device =
 230                device_create(internal_desc->class, parent, dev_info->devt,
 231                              gasket_dev, dev_info->name);
 232
 233        /* cdev has not yet been added; cdev_added is 0 */
 234        dev_info->gasket_dev_ptr = gasket_dev;
 235        /* ownership is all 0, indicating no owner or opens. */
 236
 237        return 0;
 238}
 239
 240/* Free a Gasket device. */
 241static void gasket_free_dev(struct gasket_dev *gasket_dev)
 242{
 243        struct gasket_internal_desc *internal_desc = gasket_dev->internal_desc;
 244
 245        mutex_lock(&internal_desc->mutex);
 246        internal_desc->devs[gasket_dev->dev_idx] = NULL;
 247        mutex_unlock(&internal_desc->mutex);
 248        put_device(gasket_dev->dev);
 249        kfree(gasket_dev);
 250}
 251
 252/*
 253 * Maps the specified bar into kernel space.
 254 *
 255 * Returns 0 on success, a negative error code otherwise.
 256 * A zero-sized BAR will not be mapped, but is not an error.
 257 */
 258static int gasket_map_pci_bar(struct gasket_dev *gasket_dev, int bar_num)
 259{
 260        struct gasket_internal_desc *internal_desc = gasket_dev->internal_desc;
 261        const struct gasket_driver_desc *driver_desc =
 262                internal_desc->driver_desc;
 263        ulong desc_bytes = driver_desc->bar_descriptions[bar_num].size;
 264        struct gasket_bar_data *data;
 265        int ret;
 266
 267        if (desc_bytes == 0)
 268                return 0;
 269
 270        if (driver_desc->bar_descriptions[bar_num].type != PCI_BAR) {
 271                /* not PCI: skip this entry */
 272                return 0;
 273        }
 274
 275        data = &gasket_dev->bar_data[bar_num];
 276
 277        /*
 278         * pci_resource_start and pci_resource_len return a "resource_size_t",
 279         * which is safely castable to ulong (which itself is the arg to
 280         * request_mem_region).
 281         */
 282        data->phys_base =
 283                (ulong)pci_resource_start(gasket_dev->pci_dev, bar_num);
 284        if (!data->phys_base) {
 285                dev_err(gasket_dev->dev, "Cannot get BAR%u base address\n",
 286                        bar_num);
 287                return -EINVAL;
 288        }
 289
 290        data->length_bytes =
 291                (ulong)pci_resource_len(gasket_dev->pci_dev, bar_num);
 292        if (data->length_bytes < desc_bytes) {
 293                dev_err(gasket_dev->dev,
 294                        "PCI BAR %u space is too small: %lu; expected >= %lu\n",
 295                        bar_num, data->length_bytes, desc_bytes);
 296                return -ENOMEM;
 297        }
 298
 299        if (!request_mem_region(data->phys_base, data->length_bytes,
 300                                gasket_dev->dev_info.name)) {
 301                dev_err(gasket_dev->dev,
 302                        "Cannot get BAR %d memory region %p\n",
 303                        bar_num, &gasket_dev->pci_dev->resource[bar_num]);
 304                return -EINVAL;
 305        }
 306
 307        data->virt_base = ioremap(data->phys_base, data->length_bytes);
 308        if (!data->virt_base) {
 309                dev_err(gasket_dev->dev,
 310                        "Cannot remap BAR %d memory region %p\n",
 311                        bar_num, &gasket_dev->pci_dev->resource[bar_num]);
 312                ret = -ENOMEM;
 313                goto fail;
 314        }
 315
 316        dma_set_mask(&gasket_dev->pci_dev->dev, DMA_BIT_MASK(64));
 317        dma_set_coherent_mask(&gasket_dev->pci_dev->dev, DMA_BIT_MASK(64));
 318
 319        return 0;
 320
 321fail:
 322        iounmap(data->virt_base);
 323        release_mem_region(data->phys_base, data->length_bytes);
 324        return ret;
 325}
 326
 327/*
 328 * Releases PCI BAR mapping.
 329 *
 330 * A zero-sized or not-mapped BAR will not be unmapped, but is not an error.
 331 */
 332static void gasket_unmap_pci_bar(struct gasket_dev *dev, int bar_num)
 333{
 334        ulong base, bytes;
 335        struct gasket_internal_desc *internal_desc = dev->internal_desc;
 336        const struct gasket_driver_desc *driver_desc =
 337                internal_desc->driver_desc;
 338
 339        if (driver_desc->bar_descriptions[bar_num].size == 0 ||
 340            !dev->bar_data[bar_num].virt_base)
 341                return;
 342
 343        if (driver_desc->bar_descriptions[bar_num].type != PCI_BAR)
 344                return;
 345
 346        iounmap(dev->bar_data[bar_num].virt_base);
 347        dev->bar_data[bar_num].virt_base = NULL;
 348
 349        base = pci_resource_start(dev->pci_dev, bar_num);
 350        if (!base) {
 351                dev_err(dev->dev, "cannot get PCI BAR%u base address\n",
 352                        bar_num);
 353                return;
 354        }
 355
 356        bytes = pci_resource_len(dev->pci_dev, bar_num);
 357        release_mem_region(base, bytes);
 358}
 359
 360/*
 361 * Setup PCI memory mapping for the specified device.
 362 *
 363 * Reads the BAR registers and sets up pointers to the device's memory mapped
 364 * IO space.
 365 *
 366 * Returns 0 on success and a negative value otherwise.
 367 */
 368static int gasket_setup_pci(struct pci_dev *pci_dev,
 369                            struct gasket_dev *gasket_dev)
 370{
 371        int i, mapped_bars, ret;
 372
 373        for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 374                ret = gasket_map_pci_bar(gasket_dev, i);
 375                if (ret) {
 376                        mapped_bars = i;
 377                        goto fail;
 378                }
 379        }
 380
 381        return 0;
 382
 383fail:
 384        for (i = 0; i < mapped_bars; i++)
 385                gasket_unmap_pci_bar(gasket_dev, i);
 386
 387        return -ENOMEM;
 388}
 389
 390/* Unmaps memory for the specified device. */
 391static void gasket_cleanup_pci(struct gasket_dev *gasket_dev)
 392{
 393        int i;
 394
 395        for (i = 0; i < PCI_STD_NUM_BARS; i++)
 396                gasket_unmap_pci_bar(gasket_dev, i);
 397}
 398
 399/* Determine the health of the Gasket device. */
 400static int gasket_get_hw_status(struct gasket_dev *gasket_dev)
 401{
 402        int status;
 403        int i;
 404        const struct gasket_driver_desc *driver_desc =
 405                gasket_dev->internal_desc->driver_desc;
 406
 407        status = gasket_check_and_invoke_callback_nolock(gasket_dev,
 408                                                         driver_desc->device_status_cb);
 409        if (status != GASKET_STATUS_ALIVE) {
 410                dev_dbg(gasket_dev->dev, "Hardware reported status %d.\n",
 411                        status);
 412                return status;
 413        }
 414
 415        status = gasket_interrupt_system_status(gasket_dev);
 416        if (status != GASKET_STATUS_ALIVE) {
 417                dev_dbg(gasket_dev->dev,
 418                        "Interrupt system reported status %d.\n", status);
 419                return status;
 420        }
 421
 422        for (i = 0; i < driver_desc->num_page_tables; ++i) {
 423                status = gasket_page_table_system_status(gasket_dev->page_table[i]);
 424                if (status != GASKET_STATUS_ALIVE) {
 425                        dev_dbg(gasket_dev->dev,
 426                                "Page table %d reported status %d.\n",
 427                                i, status);
 428                        return status;
 429                }
 430        }
 431
 432        return GASKET_STATUS_ALIVE;
 433}
 434
 435static ssize_t
 436gasket_write_mappable_regions(char *buf,
 437                              const struct gasket_driver_desc *driver_desc,
 438                              int bar_index)
 439{
 440        int i;
 441        ssize_t written;
 442        ssize_t total_written = 0;
 443        ulong min_addr, max_addr;
 444        struct gasket_bar_desc bar_desc =
 445                driver_desc->bar_descriptions[bar_index];
 446
 447        if (bar_desc.permissions == GASKET_NOMAP)
 448                return 0;
 449        for (i = 0;
 450             i < bar_desc.num_mappable_regions && total_written < PAGE_SIZE;
 451             i++) {
 452                min_addr = bar_desc.mappable_regions[i].start -
 453                           driver_desc->legacy_mmap_address_offset;
 454                max_addr = bar_desc.mappable_regions[i].start -
 455                           driver_desc->legacy_mmap_address_offset +
 456                           bar_desc.mappable_regions[i].length_bytes;
 457                written = scnprintf(buf, PAGE_SIZE - total_written,
 458                                    "0x%08lx-0x%08lx\n", min_addr, max_addr);
 459                total_written += written;
 460                buf += written;
 461        }
 462        return total_written;
 463}
 464
 465static ssize_t gasket_sysfs_data_show(struct device *device,
 466                                      struct device_attribute *attr, char *buf)
 467{
 468        int i, ret = 0;
 469        ssize_t current_written = 0;
 470        const struct gasket_driver_desc *driver_desc;
 471        struct gasket_dev *gasket_dev;
 472        struct gasket_sysfs_attribute *gasket_attr;
 473        const struct gasket_bar_desc *bar_desc;
 474        enum gasket_sysfs_attribute_type sysfs_type;
 475
 476        gasket_dev = gasket_sysfs_get_device_data(device);
 477        if (!gasket_dev) {
 478                dev_err(device, "No sysfs mapping found for device\n");
 479                return 0;
 480        }
 481
 482        gasket_attr = gasket_sysfs_get_attr(device, attr);
 483        if (!gasket_attr) {
 484                dev_err(device, "No sysfs attr found for device\n");
 485                gasket_sysfs_put_device_data(device, gasket_dev);
 486                return 0;
 487        }
 488
 489        driver_desc = gasket_dev->internal_desc->driver_desc;
 490
 491        sysfs_type =
 492                (enum gasket_sysfs_attribute_type)gasket_attr->data.attr_type;
 493        switch (sysfs_type) {
 494        case ATTR_BAR_OFFSETS:
 495                for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 496                        bar_desc = &driver_desc->bar_descriptions[i];
 497                        if (bar_desc->size == 0)
 498                                continue;
 499                        current_written =
 500                                snprintf(buf, PAGE_SIZE - ret, "%d: 0x%lx\n", i,
 501                                         (ulong)bar_desc->base);
 502                        buf += current_written;
 503                        ret += current_written;
 504                }
 505                break;
 506        case ATTR_BAR_SIZES:
 507                for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 508                        bar_desc = &driver_desc->bar_descriptions[i];
 509                        if (bar_desc->size == 0)
 510                                continue;
 511                        current_written =
 512                                snprintf(buf, PAGE_SIZE - ret, "%d: 0x%lx\n", i,
 513                                         (ulong)bar_desc->size);
 514                        buf += current_written;
 515                        ret += current_written;
 516                }
 517                break;
 518        case ATTR_DRIVER_VERSION:
 519                ret = snprintf(buf, PAGE_SIZE, "%s\n",
 520                               gasket_dev->internal_desc->driver_desc->driver_version);
 521                break;
 522        case ATTR_FRAMEWORK_VERSION:
 523                ret = snprintf(buf, PAGE_SIZE, "%s\n",
 524                               GASKET_FRAMEWORK_VERSION);
 525                break;
 526        case ATTR_DEVICE_TYPE:
 527                ret = snprintf(buf, PAGE_SIZE, "%s\n",
 528                               gasket_dev->internal_desc->driver_desc->name);
 529                break;
 530        case ATTR_HARDWARE_REVISION:
 531                ret = snprintf(buf, PAGE_SIZE, "%d\n",
 532                               gasket_dev->hardware_revision);
 533                break;
 534        case ATTR_PCI_ADDRESS:
 535                ret = snprintf(buf, PAGE_SIZE, "%s\n", gasket_dev->kobj_name);
 536                break;
 537        case ATTR_STATUS:
 538                ret = snprintf(buf, PAGE_SIZE, "%s\n",
 539                               gasket_num_name_lookup(gasket_dev->status,
 540                                                      gasket_status_name_table));
 541                break;
 542        case ATTR_IS_DEVICE_OWNED:
 543                ret = snprintf(buf, PAGE_SIZE, "%d\n",
 544                               gasket_dev->dev_info.ownership.is_owned);
 545                break;
 546        case ATTR_DEVICE_OWNER:
 547                ret = snprintf(buf, PAGE_SIZE, "%d\n",
 548                               gasket_dev->dev_info.ownership.owner);
 549                break;
 550        case ATTR_WRITE_OPEN_COUNT:
 551                ret = snprintf(buf, PAGE_SIZE, "%d\n",
 552                               gasket_dev->dev_info.ownership.write_open_count);
 553                break;
 554        case ATTR_RESET_COUNT:
 555                ret = snprintf(buf, PAGE_SIZE, "%d\n", gasket_dev->reset_count);
 556                break;
 557        case ATTR_USER_MEM_RANGES:
 558                for (i = 0; i < PCI_STD_NUM_BARS; ++i) {
 559                        current_written =
 560                                gasket_write_mappable_regions(buf, driver_desc,
 561                                                              i);
 562                        buf += current_written;
 563                        ret += current_written;
 564                }
 565                break;
 566        default:
 567                dev_dbg(gasket_dev->dev, "Unknown attribute: %s\n",
 568                        attr->attr.name);
 569                ret = 0;
 570                break;
 571        }
 572
 573        gasket_sysfs_put_attr(device, gasket_attr);
 574        gasket_sysfs_put_device_data(device, gasket_dev);
 575        return ret;
 576}
 577
 578/* These attributes apply to all Gasket driver instances. */
 579static const struct gasket_sysfs_attribute gasket_sysfs_generic_attrs[] = {
 580        GASKET_SYSFS_RO(bar_offsets, gasket_sysfs_data_show, ATTR_BAR_OFFSETS),
 581        GASKET_SYSFS_RO(bar_sizes, gasket_sysfs_data_show, ATTR_BAR_SIZES),
 582        GASKET_SYSFS_RO(driver_version, gasket_sysfs_data_show,
 583                        ATTR_DRIVER_VERSION),
 584        GASKET_SYSFS_RO(framework_version, gasket_sysfs_data_show,
 585                        ATTR_FRAMEWORK_VERSION),
 586        GASKET_SYSFS_RO(device_type, gasket_sysfs_data_show, ATTR_DEVICE_TYPE),
 587        GASKET_SYSFS_RO(revision, gasket_sysfs_data_show,
 588                        ATTR_HARDWARE_REVISION),
 589        GASKET_SYSFS_RO(pci_address, gasket_sysfs_data_show, ATTR_PCI_ADDRESS),
 590        GASKET_SYSFS_RO(status, gasket_sysfs_data_show, ATTR_STATUS),
 591        GASKET_SYSFS_RO(is_device_owned, gasket_sysfs_data_show,
 592                        ATTR_IS_DEVICE_OWNED),
 593        GASKET_SYSFS_RO(device_owner, gasket_sysfs_data_show,
 594                        ATTR_DEVICE_OWNER),
 595        GASKET_SYSFS_RO(write_open_count, gasket_sysfs_data_show,
 596                        ATTR_WRITE_OPEN_COUNT),
 597        GASKET_SYSFS_RO(reset_count, gasket_sysfs_data_show, ATTR_RESET_COUNT),
 598        GASKET_SYSFS_RO(user_mem_ranges, gasket_sysfs_data_show,
 599                        ATTR_USER_MEM_RANGES),
 600        GASKET_END_OF_ATTR_ARRAY
 601};
 602
 603/* Add a char device and related info. */
 604static int gasket_add_cdev(struct gasket_cdev_info *dev_info,
 605                           const struct file_operations *file_ops,
 606                           struct module *owner)
 607{
 608        int ret;
 609
 610        cdev_init(&dev_info->cdev, file_ops);
 611        dev_info->cdev.owner = owner;
 612        ret = cdev_add(&dev_info->cdev, dev_info->devt, 1);
 613        if (ret) {
 614                dev_err(dev_info->gasket_dev_ptr->dev,
 615                        "cannot add char device [ret=%d]\n", ret);
 616                return ret;
 617        }
 618        dev_info->cdev_added = 1;
 619
 620        return 0;
 621}
 622
 623/* Disable device operations. */
 624void gasket_disable_device(struct gasket_dev *gasket_dev)
 625{
 626        const struct gasket_driver_desc *driver_desc =
 627                gasket_dev->internal_desc->driver_desc;
 628        int i;
 629
 630        /* Only delete the device if it has been successfully added. */
 631        if (gasket_dev->dev_info.cdev_added)
 632                cdev_del(&gasket_dev->dev_info.cdev);
 633
 634        gasket_dev->status = GASKET_STATUS_DEAD;
 635
 636        gasket_interrupt_cleanup(gasket_dev);
 637
 638        for (i = 0; i < driver_desc->num_page_tables; ++i) {
 639                if (gasket_dev->page_table[i]) {
 640                        gasket_page_table_reset(gasket_dev->page_table[i]);
 641                        gasket_page_table_cleanup(gasket_dev->page_table[i]);
 642                }
 643        }
 644}
 645EXPORT_SYMBOL(gasket_disable_device);
 646
 647/*
 648 * Registered driver descriptor lookup for PCI devices.
 649 *
 650 * Precondition: Called with g_mutex held (to avoid a race on return).
 651 * Returns NULL if no matching device was found.
 652 */
 653static struct gasket_internal_desc *
 654lookup_pci_internal_desc(struct pci_dev *pci_dev)
 655{
 656        int i;
 657
 658        __must_hold(&g_mutex);
 659        for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
 660                if (g_descs[i].driver_desc &&
 661                    g_descs[i].driver_desc->pci_id_table &&
 662                    pci_match_id(g_descs[i].driver_desc->pci_id_table, pci_dev))
 663                        return &g_descs[i];
 664        }
 665
 666        return NULL;
 667}
 668
 669/*
 670 * Verifies that the user has permissions to perform the requested mapping and
 671 * that the provided descriptor/range is of adequate size to hold the range to
 672 * be mapped.
 673 */
 674static bool gasket_mmap_has_permissions(struct gasket_dev *gasket_dev,
 675                                        struct vm_area_struct *vma,
 676                                        int bar_permissions)
 677{
 678        int requested_permissions;
 679        /* Always allow sysadmin to access. */
 680        if (capable(CAP_SYS_ADMIN))
 681                return true;
 682
 683        /* Never allow non-sysadmins to access to a dead device. */
 684        if (gasket_dev->status != GASKET_STATUS_ALIVE) {
 685                dev_dbg(gasket_dev->dev, "Device is dead.\n");
 686                return false;
 687        }
 688
 689        /* Make sure that no wrong flags are set. */
 690        requested_permissions =
 691                (vma->vm_flags & VM_ACCESS_FLAGS);
 692        if (requested_permissions & ~(bar_permissions)) {
 693                dev_dbg(gasket_dev->dev,
 694                        "Attempting to map a region with requested permissions 0x%x, but region has permissions 0x%x.\n",
 695                        requested_permissions, bar_permissions);
 696                return false;
 697        }
 698
 699        /* Do not allow a non-owner to write. */
 700        if ((vma->vm_flags & VM_WRITE) &&
 701            !gasket_owned_by_current_tgid(&gasket_dev->dev_info)) {
 702                dev_dbg(gasket_dev->dev,
 703                        "Attempting to mmap a region for write without owning device.\n");
 704                return false;
 705        }
 706
 707        return true;
 708}
 709
 710/*
 711 * Verifies that the input address is within the region allocated to coherent
 712 * buffer.
 713 */
 714static bool
 715gasket_is_coherent_region(const struct gasket_driver_desc *driver_desc,
 716                          ulong address)
 717{
 718        struct gasket_coherent_buffer_desc coh_buff_desc =
 719                driver_desc->coherent_buffer_description;
 720
 721        if (coh_buff_desc.permissions != GASKET_NOMAP) {
 722                if ((address >= coh_buff_desc.base) &&
 723                    (address < coh_buff_desc.base + coh_buff_desc.size)) {
 724                        return true;
 725                }
 726        }
 727        return false;
 728}
 729
 730static int gasket_get_bar_index(const struct gasket_dev *gasket_dev,
 731                                ulong phys_addr)
 732{
 733        int i;
 734        const struct gasket_driver_desc *driver_desc;
 735
 736        driver_desc = gasket_dev->internal_desc->driver_desc;
 737        for (i = 0; i < PCI_STD_NUM_BARS; ++i) {
 738                struct gasket_bar_desc bar_desc =
 739                        driver_desc->bar_descriptions[i];
 740
 741                if (bar_desc.permissions != GASKET_NOMAP) {
 742                        if (phys_addr >= bar_desc.base &&
 743                            phys_addr < (bar_desc.base + bar_desc.size)) {
 744                                return i;
 745                        }
 746                }
 747        }
 748        /* If we haven't found the address by now, it is invalid. */
 749        return -EINVAL;
 750}
 751
 752/*
 753 * Sets the actual bounds to map, given the device's mappable region.
 754 *
 755 * Given the device's mappable region, along with the user-requested mapping
 756 * start offset and length of the user region, determine how much of this
 757 * mappable region can be mapped into the user's region (start/end offsets),
 758 * and the physical offset (phys_offset) into the BAR where the mapping should
 759 * begin (either the VMA's or region lower bound).
 760 *
 761 * In other words, this calculates the overlap between the VMA
 762 * (bar_offset, requested_length) and the given gasket_mappable_region.
 763 *
 764 * Returns true if there's anything to map, and false otherwise.
 765 */
 766static bool
 767gasket_mm_get_mapping_addrs(const struct gasket_mappable_region *region,
 768                            ulong bar_offset, ulong requested_length,
 769                            struct gasket_mappable_region *mappable_region,
 770                            ulong *virt_offset)
 771{
 772        ulong range_start = region->start;
 773        ulong range_length = region->length_bytes;
 774        ulong range_end = range_start + range_length;
 775
 776        *virt_offset = 0;
 777        if (bar_offset + requested_length < range_start) {
 778                /*
 779                 * If the requested region is completely below the range,
 780                 * there is nothing to map.
 781                 */
 782                return false;
 783        } else if (bar_offset <= range_start) {
 784                /* If the bar offset is below this range's start
 785                 * but the requested length continues into it:
 786                 * 1) Only map starting from the beginning of this
 787                 *      range's phys. offset, so we don't map unmappable
 788                 *      memory.
 789                 * 2) The length of the virtual memory to not map is the
 790                 *      delta between the bar offset and the
 791                 *      mappable start (and since the mappable start is
 792                 *      bigger, start - req.)
 793                 * 3) The map length is the minimum of the mappable
 794                 *      requested length (requested_length - virt_offset)
 795                 *      and the actual mappable length of the range.
 796                 */
 797                mappable_region->start = range_start;
 798                *virt_offset = range_start - bar_offset;
 799                mappable_region->length_bytes =
 800                        min(requested_length - *virt_offset, range_length);
 801                return true;
 802        } else if (bar_offset > range_start &&
 803                   bar_offset < range_end) {
 804                /*
 805                 * If the bar offset is within this range:
 806                 * 1) Map starting from the bar offset.
 807                 * 2) Because there is no forbidden memory between the
 808                 *      bar offset and the range start,
 809                 *      virt_offset is 0.
 810                 * 3) The map length is the minimum of the requested
 811                 *      length and the remaining length in the buffer
 812                 *      (range_end - bar_offset)
 813                 */
 814                mappable_region->start = bar_offset;
 815                *virt_offset = 0;
 816                mappable_region->length_bytes =
 817                        min(requested_length, range_end - bar_offset);
 818                return true;
 819        }
 820
 821        /*
 822         * If the requested [start] offset is above range_end,
 823         * there's nothing to map.
 824         */
 825        return false;
 826}
 827
 828/*
 829 * Calculates the offset where the VMA range begins in its containing BAR.
 830 * The offset is written into bar_offset on success.
 831 * Returns zero on success, anything else on error.
 832 */
 833static int gasket_mm_vma_bar_offset(const struct gasket_dev *gasket_dev,
 834                                    const struct vm_area_struct *vma,
 835                                    ulong *bar_offset)
 836{
 837        ulong raw_offset;
 838        int bar_index;
 839        const struct gasket_driver_desc *driver_desc =
 840                gasket_dev->internal_desc->driver_desc;
 841
 842        raw_offset = (vma->vm_pgoff << PAGE_SHIFT) +
 843                driver_desc->legacy_mmap_address_offset;
 844        bar_index = gasket_get_bar_index(gasket_dev, raw_offset);
 845        if (bar_index < 0) {
 846                dev_err(gasket_dev->dev,
 847                        "Unable to find matching bar for address 0x%lx\n",
 848                        raw_offset);
 849                trace_gasket_mmap_exit(bar_index);
 850                return bar_index;
 851        }
 852        *bar_offset =
 853                raw_offset - driver_desc->bar_descriptions[bar_index].base;
 854
 855        return 0;
 856}
 857
 858int gasket_mm_unmap_region(const struct gasket_dev *gasket_dev,
 859                           struct vm_area_struct *vma,
 860                           const struct gasket_mappable_region *map_region)
 861{
 862        ulong bar_offset;
 863        ulong virt_offset;
 864        struct gasket_mappable_region mappable_region;
 865        int ret;
 866
 867        if (map_region->length_bytes == 0)
 868                return 0;
 869
 870        ret = gasket_mm_vma_bar_offset(gasket_dev, vma, &bar_offset);
 871        if (ret)
 872                return ret;
 873
 874        if (!gasket_mm_get_mapping_addrs(map_region, bar_offset,
 875                                         vma->vm_end - vma->vm_start,
 876                                         &mappable_region, &virt_offset))
 877                return 1;
 878
 879        /*
 880         * The length passed to zap_vma_ptes MUST BE A MULTIPLE OF
 881         * PAGE_SIZE! Trust me. I have the scars.
 882         *
 883         * Next multiple of y: ceil_div(x, y) * y
 884         */
 885        zap_vma_ptes(vma, vma->vm_start + virt_offset,
 886                     DIV_ROUND_UP(mappable_region.length_bytes, PAGE_SIZE) *
 887                     PAGE_SIZE);
 888        return 0;
 889}
 890EXPORT_SYMBOL(gasket_mm_unmap_region);
 891
 892/* Maps a virtual address + range to a physical offset of a BAR. */
 893static enum do_map_region_status
 894do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma,
 895              struct gasket_mappable_region *mappable_region)
 896{
 897        /* Maximum size of a single call to io_remap_pfn_range. */
 898        /* I pulled this number out of thin air. */
 899        const ulong max_chunk_size = 64 * 1024 * 1024;
 900        ulong chunk_size, mapped_bytes = 0;
 901
 902        const struct gasket_driver_desc *driver_desc =
 903                gasket_dev->internal_desc->driver_desc;
 904
 905        ulong bar_offset, virt_offset;
 906        struct gasket_mappable_region region_to_map;
 907        ulong phys_offset, map_length;
 908        ulong virt_base, phys_base;
 909        int bar_index, ret;
 910
 911        ret = gasket_mm_vma_bar_offset(gasket_dev, vma, &bar_offset);
 912        if (ret)
 913                return DO_MAP_REGION_INVALID;
 914
 915        if (!gasket_mm_get_mapping_addrs(mappable_region, bar_offset,
 916                                         vma->vm_end - vma->vm_start,
 917                                         &region_to_map, &virt_offset))
 918                return DO_MAP_REGION_INVALID;
 919        phys_offset = region_to_map.start;
 920        map_length = region_to_map.length_bytes;
 921
 922        virt_base = vma->vm_start + virt_offset;
 923        bar_index =
 924                gasket_get_bar_index(gasket_dev,
 925                                     (vma->vm_pgoff << PAGE_SHIFT) +
 926                                     driver_desc->legacy_mmap_address_offset);
 927
 928        if (bar_index < 0)
 929                return DO_MAP_REGION_INVALID;
 930
 931        phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset;
 932        while (mapped_bytes < map_length) {
 933                /*
 934                 * io_remap_pfn_range can take a while, so we chunk its
 935                 * calls and call cond_resched between each.
 936                 */
 937                chunk_size = min(max_chunk_size, map_length - mapped_bytes);
 938
 939                cond_resched();
 940                ret = io_remap_pfn_range(vma, virt_base + mapped_bytes,
 941                                         (phys_base + mapped_bytes) >>
 942                                         PAGE_SHIFT, chunk_size,
 943                                         vma->vm_page_prot);
 944                if (ret) {
 945                        dev_err(gasket_dev->dev,
 946                                "Error remapping PFN range.\n");
 947                        goto fail;
 948                }
 949                mapped_bytes += chunk_size;
 950        }
 951
 952        return DO_MAP_REGION_SUCCESS;
 953
 954fail:
 955        /* Unmap the partial chunk we mapped. */
 956        mappable_region->length_bytes = mapped_bytes;
 957        if (gasket_mm_unmap_region(gasket_dev, vma, mappable_region))
 958                dev_err(gasket_dev->dev,
 959                        "Error unmapping partial region 0x%lx (0x%lx bytes)\n",
 960                        (ulong)virt_offset,
 961                        (ulong)mapped_bytes);
 962
 963        return DO_MAP_REGION_FAILURE;
 964}
 965
 966/* Map a region of coherent memory. */
 967static int gasket_mmap_coherent(struct gasket_dev *gasket_dev,
 968                                struct vm_area_struct *vma)
 969{
 970        const struct gasket_driver_desc *driver_desc =
 971                gasket_dev->internal_desc->driver_desc;
 972        const ulong requested_length = vma->vm_end - vma->vm_start;
 973        int ret;
 974        ulong permissions;
 975
 976        if (requested_length == 0 || requested_length >
 977            gasket_dev->coherent_buffer.length_bytes) {
 978                trace_gasket_mmap_exit(-EINVAL);
 979                return -EINVAL;
 980        }
 981
 982        permissions = driver_desc->coherent_buffer_description.permissions;
 983        if (!gasket_mmap_has_permissions(gasket_dev, vma, permissions)) {
 984                dev_err(gasket_dev->dev, "Permission checking failed.\n");
 985                trace_gasket_mmap_exit(-EPERM);
 986                return -EPERM;
 987        }
 988
 989        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 990
 991        ret = remap_pfn_range(vma, vma->vm_start,
 992                              (gasket_dev->coherent_buffer.phys_base) >>
 993                              PAGE_SHIFT, requested_length, vma->vm_page_prot);
 994        if (ret) {
 995                dev_err(gasket_dev->dev, "Error remapping PFN range err=%d.\n",
 996                        ret);
 997                trace_gasket_mmap_exit(ret);
 998                return ret;
 999        }
1000
1001        /* Record the user virtual to dma_address mapping that was
1002         * created by the kernel.
1003         */
1004        gasket_set_user_virt(gasket_dev, requested_length,
1005                             gasket_dev->coherent_buffer.phys_base,
1006                             vma->vm_start);
1007        return 0;
1008}
1009
1010/* Map a device's BARs into user space. */
1011static int gasket_mmap(struct file *filp, struct vm_area_struct *vma)
1012{
1013        int i, ret;
1014        int bar_index;
1015        int has_mapped_anything = 0;
1016        ulong permissions;
1017        ulong raw_offset, vma_size;
1018        bool is_coherent_region;
1019        const struct gasket_driver_desc *driver_desc;
1020        struct gasket_dev *gasket_dev = (struct gasket_dev *)filp->private_data;
1021        const struct gasket_bar_desc *bar_desc;
1022        struct gasket_mappable_region *map_regions = NULL;
1023        int num_map_regions = 0;
1024        enum do_map_region_status map_status;
1025
1026        driver_desc = gasket_dev->internal_desc->driver_desc;
1027
1028        if (vma->vm_start & ~PAGE_MASK) {
1029                dev_err(gasket_dev->dev,
1030                        "Base address not page-aligned: 0x%lx\n",
1031                        vma->vm_start);
1032                trace_gasket_mmap_exit(-EINVAL);
1033                return -EINVAL;
1034        }
1035
1036        /* Calculate the offset of this range into physical mem. */
1037        raw_offset = (vma->vm_pgoff << PAGE_SHIFT) +
1038                driver_desc->legacy_mmap_address_offset;
1039        vma_size = vma->vm_end - vma->vm_start;
1040        trace_gasket_mmap_entry(gasket_dev->dev_info.name, raw_offset,
1041                                vma_size);
1042
1043        /*
1044         * Check if the raw offset is within a bar region. If not, check if it
1045         * is a coherent region.
1046         */
1047        bar_index = gasket_get_bar_index(gasket_dev, raw_offset);
1048        is_coherent_region = gasket_is_coherent_region(driver_desc, raw_offset);
1049        if (bar_index < 0 && !is_coherent_region) {
1050                dev_err(gasket_dev->dev,
1051                        "Unable to find matching bar for address 0x%lx\n",
1052                        raw_offset);
1053                trace_gasket_mmap_exit(bar_index);
1054                return bar_index;
1055        }
1056        if (bar_index > 0 && is_coherent_region) {
1057                dev_err(gasket_dev->dev,
1058                        "double matching bar and coherent buffers for address 0x%lx\n",
1059                        raw_offset);
1060                trace_gasket_mmap_exit(bar_index);
1061                return -EINVAL;
1062        }
1063
1064        vma->vm_private_data = gasket_dev;
1065
1066        if (is_coherent_region)
1067                return gasket_mmap_coherent(gasket_dev, vma);
1068
1069        /* Everything in the rest of this function is for normal BAR mapping. */
1070
1071        /*
1072         * Subtract the base of the bar from the raw offset to get the
1073         * memory location within the bar to map.
1074         */
1075        bar_desc = &driver_desc->bar_descriptions[bar_index];
1076        permissions = bar_desc->permissions;
1077        if (!gasket_mmap_has_permissions(gasket_dev, vma, permissions)) {
1078                dev_err(gasket_dev->dev, "Permission checking failed.\n");
1079                trace_gasket_mmap_exit(-EPERM);
1080                return -EPERM;
1081        }
1082
1083        if (driver_desc->get_mappable_regions_cb) {
1084                ret = driver_desc->get_mappable_regions_cb(gasket_dev,
1085                                                           bar_index,
1086                                                           &map_regions,
1087                                                           &num_map_regions);
1088                if (ret)
1089                        return ret;
1090        } else {
1091                if (!gasket_mmap_has_permissions(gasket_dev, vma,
1092                                                 bar_desc->permissions)) {
1093                        dev_err(gasket_dev->dev,
1094                                "Permission checking failed.\n");
1095                        trace_gasket_mmap_exit(-EPERM);
1096                        return -EPERM;
1097                }
1098                num_map_regions = bar_desc->num_mappable_regions;
1099                map_regions = kcalloc(num_map_regions,
1100                                      sizeof(*bar_desc->mappable_regions),
1101                                      GFP_KERNEL);
1102                if (map_regions) {
1103                        memcpy(map_regions, bar_desc->mappable_regions,
1104                               num_map_regions *
1105                                        sizeof(*bar_desc->mappable_regions));
1106                }
1107        }
1108
1109        if (!map_regions || num_map_regions == 0) {
1110                dev_err(gasket_dev->dev, "No mappable regions returned!\n");
1111                return -EINVAL;
1112        }
1113
1114        /* Marks the VMA's pages as uncacheable. */
1115        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1116        for (i = 0; i < num_map_regions; i++) {
1117                map_status = do_map_region(gasket_dev, vma, &map_regions[i]);
1118                /* Try the next region if this one was not mappable. */
1119                if (map_status == DO_MAP_REGION_INVALID)
1120                        continue;
1121                if (map_status == DO_MAP_REGION_FAILURE) {
1122                        ret = -ENOMEM;
1123                        goto fail;
1124                }
1125
1126                has_mapped_anything = 1;
1127        }
1128
1129        kfree(map_regions);
1130
1131        /* If we could not map any memory, the request was invalid. */
1132        if (!has_mapped_anything) {
1133                dev_err(gasket_dev->dev,
1134                        "Map request did not contain a valid region.\n");
1135                trace_gasket_mmap_exit(-EINVAL);
1136                return -EINVAL;
1137        }
1138
1139        trace_gasket_mmap_exit(0);
1140        return 0;
1141
1142fail:
1143        /* Need to unmap any mapped ranges. */
1144        num_map_regions = i;
1145        for (i = 0; i < num_map_regions; i++)
1146                if (gasket_mm_unmap_region(gasket_dev, vma,
1147                                           &bar_desc->mappable_regions[i]))
1148                        dev_err(gasket_dev->dev, "Error unmapping range %d.\n",
1149                                i);
1150        kfree(map_regions);
1151
1152        return ret;
1153}
1154
1155/*
1156 * Open the char device file.
1157 *
1158 * If the open is for writing, and the device is not owned, this process becomes
1159 * the owner.  If the open is for writing and the device is already owned by
1160 * some other process, it is an error.  If this process is the owner, increment
1161 * the open count.
1162 *
1163 * Returns 0 if successful, a negative error number otherwise.
1164 */
1165static int gasket_open(struct inode *inode, struct file *filp)
1166{
1167        int ret;
1168        struct gasket_dev *gasket_dev;
1169        const struct gasket_driver_desc *driver_desc;
1170        struct gasket_ownership *ownership;
1171        char task_name[TASK_COMM_LEN];
1172        struct gasket_cdev_info *dev_info =
1173            container_of(inode->i_cdev, struct gasket_cdev_info, cdev);
1174        struct pid_namespace *pid_ns = task_active_pid_ns(current);
1175        bool is_root = ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN);
1176
1177        gasket_dev = dev_info->gasket_dev_ptr;
1178        driver_desc = gasket_dev->internal_desc->driver_desc;
1179        ownership = &dev_info->ownership;
1180        get_task_comm(task_name, current);
1181        filp->private_data = gasket_dev;
1182        inode->i_size = 0;
1183
1184        dev_dbg(gasket_dev->dev,
1185                "Attempting to open with tgid %u (%s) (f_mode: 0%03o, fmode_write: %d is_root: %u)\n",
1186                current->tgid, task_name, filp->f_mode,
1187                (filp->f_mode & FMODE_WRITE), is_root);
1188
1189        /* Always allow non-writing accesses. */
1190        if (!(filp->f_mode & FMODE_WRITE)) {
1191                dev_dbg(gasket_dev->dev, "Allowing read-only opening.\n");
1192                return 0;
1193        }
1194
1195        mutex_lock(&gasket_dev->mutex);
1196
1197        dev_dbg(gasket_dev->dev,
1198                "Current owner open count (owning tgid %u): %d.\n",
1199                ownership->owner, ownership->write_open_count);
1200
1201        /* Opening a node owned by another TGID is an error (unless root) */
1202        if (ownership->is_owned && ownership->owner != current->tgid &&
1203            !is_root) {
1204                dev_err(gasket_dev->dev,
1205                        "Process %u is opening a node held by %u.\n",
1206                        current->tgid, ownership->owner);
1207                mutex_unlock(&gasket_dev->mutex);
1208                return -EPERM;
1209        }
1210
1211        /* If the node is not owned, assign it to the current TGID. */
1212        if (!ownership->is_owned) {
1213                ret = gasket_check_and_invoke_callback_nolock(gasket_dev,
1214                                                              driver_desc->device_open_cb);
1215                if (ret) {
1216                        dev_err(gasket_dev->dev,
1217                                "Error in device open cb: %d\n", ret);
1218                        mutex_unlock(&gasket_dev->mutex);
1219                        return ret;
1220                }
1221                ownership->is_owned = 1;
1222                ownership->owner = current->tgid;
1223                dev_dbg(gasket_dev->dev, "Device owner is now tgid %u\n",
1224                        ownership->owner);
1225        }
1226
1227        ownership->write_open_count++;
1228
1229        dev_dbg(gasket_dev->dev, "New open count (owning tgid %u): %d\n",
1230                ownership->owner, ownership->write_open_count);
1231
1232        mutex_unlock(&gasket_dev->mutex);
1233        return 0;
1234}
1235
1236/*
1237 * Called on a close of the device file.  If this process is the owner,
1238 * decrement the open count.  On last close by the owner, free up buffers and
1239 * eventfd contexts, and release ownership.
1240 *
1241 * Returns 0 if successful, a negative error number otherwise.
1242 */
1243static int gasket_release(struct inode *inode, struct file *file)
1244{
1245        int i;
1246        struct gasket_dev *gasket_dev;
1247        struct gasket_ownership *ownership;
1248        const struct gasket_driver_desc *driver_desc;
1249        char task_name[TASK_COMM_LEN];
1250        struct gasket_cdev_info *dev_info =
1251                container_of(inode->i_cdev, struct gasket_cdev_info, cdev);
1252        struct pid_namespace *pid_ns = task_active_pid_ns(current);
1253        bool is_root = ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN);
1254
1255        gasket_dev = dev_info->gasket_dev_ptr;
1256        driver_desc = gasket_dev->internal_desc->driver_desc;
1257        ownership = &dev_info->ownership;
1258        get_task_comm(task_name, current);
1259        mutex_lock(&gasket_dev->mutex);
1260
1261        dev_dbg(gasket_dev->dev,
1262                "Releasing device node. Call origin: tgid %u (%s) (f_mode: 0%03o, fmode_write: %d, is_root: %u)\n",
1263                current->tgid, task_name, file->f_mode,
1264                (file->f_mode & FMODE_WRITE), is_root);
1265        dev_dbg(gasket_dev->dev, "Current open count (owning tgid %u): %d\n",
1266                ownership->owner, ownership->write_open_count);
1267
1268        if (file->f_mode & FMODE_WRITE) {
1269                ownership->write_open_count--;
1270                if (ownership->write_open_count == 0) {
1271                        dev_dbg(gasket_dev->dev, "Device is now free\n");
1272                        ownership->is_owned = 0;
1273                        ownership->owner = 0;
1274
1275                        /* Forces chip reset before we unmap the page tables. */
1276                        driver_desc->device_reset_cb(gasket_dev);
1277
1278                        for (i = 0; i < driver_desc->num_page_tables; ++i) {
1279                                gasket_page_table_unmap_all(gasket_dev->page_table[i]);
1280                                gasket_page_table_garbage_collect(gasket_dev->page_table[i]);
1281                                gasket_free_coherent_memory_all(gasket_dev, i);
1282                        }
1283
1284                        /* Closes device, enters power save. */
1285                        gasket_check_and_invoke_callback_nolock(gasket_dev,
1286                                                                driver_desc->device_close_cb);
1287                }
1288        }
1289
1290        dev_dbg(gasket_dev->dev, "New open count (owning tgid %u): %d\n",
1291                ownership->owner, ownership->write_open_count);
1292        mutex_unlock(&gasket_dev->mutex);
1293        return 0;
1294}
1295
1296/*
1297 * Gasket ioctl dispatch function.
1298 *
1299 * Check if the ioctl is a generic ioctl. If not, pass the ioctl to the
1300 * ioctl_handler_cb registered in the driver description.
1301 * If the ioctl is a generic ioctl, pass it to gasket_ioctl_handler.
1302 */
1303static long gasket_ioctl(struct file *filp, uint cmd, ulong arg)
1304{
1305        struct gasket_dev *gasket_dev;
1306        const struct gasket_driver_desc *driver_desc;
1307        void __user *argp = (void __user *)arg;
1308        char path[256];
1309
1310        gasket_dev = (struct gasket_dev *)filp->private_data;
1311        driver_desc = gasket_dev->internal_desc->driver_desc;
1312        if (!driver_desc) {
1313                dev_dbg(gasket_dev->dev,
1314                        "Unable to find device descriptor for file %s\n",
1315                        d_path(&filp->f_path, path, 256));
1316                return -ENODEV;
1317        }
1318
1319        if (!gasket_is_supported_ioctl(cmd)) {
1320                /*
1321                 * The ioctl handler is not a standard Gasket callback, since
1322                 * it requires different arguments. This means we can't use
1323                 * check_and_invoke_callback.
1324                 */
1325                if (driver_desc->ioctl_handler_cb)
1326                        return driver_desc->ioctl_handler_cb(filp, cmd, argp);
1327
1328                dev_dbg(gasket_dev->dev, "Received unknown ioctl 0x%x\n", cmd);
1329                return -EINVAL;
1330        }
1331
1332        return gasket_handle_ioctl(filp, cmd, argp);
1333}
1334
1335/* File operations for all Gasket devices. */
1336static const struct file_operations gasket_file_ops = {
1337        .owner = THIS_MODULE,
1338        .llseek = no_llseek,
1339        .mmap = gasket_mmap,
1340        .open = gasket_open,
1341        .release = gasket_release,
1342        .unlocked_ioctl = gasket_ioctl,
1343};
1344
1345/* Perform final init and marks the device as active. */
1346int gasket_enable_device(struct gasket_dev *gasket_dev)
1347{
1348        int tbl_idx;
1349        int ret;
1350        const struct gasket_driver_desc *driver_desc =
1351                gasket_dev->internal_desc->driver_desc;
1352
1353        ret = gasket_interrupt_init(gasket_dev);
1354        if (ret) {
1355                dev_err(gasket_dev->dev,
1356                        "Critical failure to allocate interrupts: %d\n", ret);
1357                gasket_interrupt_cleanup(gasket_dev);
1358                return ret;
1359        }
1360
1361        for (tbl_idx = 0; tbl_idx < driver_desc->num_page_tables; tbl_idx++) {
1362                dev_dbg(gasket_dev->dev, "Initializing page table %d.\n",
1363                        tbl_idx);
1364                ret = gasket_page_table_init(&gasket_dev->page_table[tbl_idx],
1365                                             &gasket_dev->bar_data[driver_desc->page_table_bar_index],
1366                                             &driver_desc->page_table_configs[tbl_idx],
1367                                             gasket_dev->dev,
1368                                             gasket_dev->pci_dev);
1369                if (ret) {
1370                        dev_err(gasket_dev->dev,
1371                                "Couldn't init page table %d: %d\n",
1372                                tbl_idx, ret);
1373                        return ret;
1374                }
1375                /*
1376                 * Make sure that the page table is clear and set to simple
1377                 * addresses.
1378                 */
1379                gasket_page_table_reset(gasket_dev->page_table[tbl_idx]);
1380        }
1381
1382        /*
1383         * hardware_revision_cb returns a positive integer (the rev) if
1384         * successful.)
1385         */
1386        ret = check_and_invoke_callback(gasket_dev,
1387                                        driver_desc->hardware_revision_cb);
1388        if (ret < 0) {
1389                dev_err(gasket_dev->dev,
1390                        "Error getting hardware revision: %d\n", ret);
1391                return ret;
1392        }
1393        gasket_dev->hardware_revision = ret;
1394
1395        /* device_status_cb returns a device status, not an error code. */
1396        gasket_dev->status = gasket_get_hw_status(gasket_dev);
1397        if (gasket_dev->status == GASKET_STATUS_DEAD)
1398                dev_err(gasket_dev->dev, "Device reported as unhealthy.\n");
1399
1400        ret = gasket_add_cdev(&gasket_dev->dev_info, &gasket_file_ops,
1401                              driver_desc->module);
1402        if (ret)
1403                return ret;
1404
1405        return 0;
1406}
1407EXPORT_SYMBOL(gasket_enable_device);
1408
1409static int __gasket_add_device(struct device *parent_dev,
1410                               struct gasket_internal_desc *internal_desc,
1411                               struct gasket_dev **gasket_devp)
1412{
1413        int ret;
1414        struct gasket_dev *gasket_dev;
1415        const struct gasket_driver_desc *driver_desc =
1416            internal_desc->driver_desc;
1417
1418        ret = gasket_alloc_dev(internal_desc, parent_dev, &gasket_dev);
1419        if (ret)
1420                return ret;
1421        if (IS_ERR(gasket_dev->dev_info.device)) {
1422                dev_err(parent_dev, "Cannot create %s device %s [ret = %ld]\n",
1423                        driver_desc->name, gasket_dev->dev_info.name,
1424                        PTR_ERR(gasket_dev->dev_info.device));
1425                ret = -ENODEV;
1426                goto free_gasket_dev;
1427        }
1428
1429        ret = gasket_sysfs_create_mapping(gasket_dev->dev_info.device,
1430                                          gasket_dev);
1431        if (ret)
1432                goto remove_device;
1433
1434        ret = gasket_sysfs_create_entries(gasket_dev->dev_info.device,
1435                                          gasket_sysfs_generic_attrs);
1436        if (ret)
1437                goto remove_sysfs_mapping;
1438
1439        *gasket_devp = gasket_dev;
1440        return 0;
1441
1442remove_sysfs_mapping:
1443        gasket_sysfs_remove_mapping(gasket_dev->dev_info.device);
1444remove_device:
1445        device_destroy(internal_desc->class, gasket_dev->dev_info.devt);
1446free_gasket_dev:
1447        gasket_free_dev(gasket_dev);
1448        return ret;
1449}
1450
1451static void __gasket_remove_device(struct gasket_internal_desc *internal_desc,
1452                                   struct gasket_dev *gasket_dev)
1453{
1454        gasket_sysfs_remove_mapping(gasket_dev->dev_info.device);
1455        device_destroy(internal_desc->class, gasket_dev->dev_info.devt);
1456        gasket_free_dev(gasket_dev);
1457}
1458
1459/*
1460 * Add PCI gasket device.
1461 *
1462 * Called by Gasket device probe function.
1463 * Allocates device metadata and maps device memory.  The device driver must
1464 * call gasket_enable_device after driver init is complete to place the device
1465 * in active use.
1466 */
1467int gasket_pci_add_device(struct pci_dev *pci_dev,
1468                          struct gasket_dev **gasket_devp)
1469{
1470        int ret;
1471        struct gasket_internal_desc *internal_desc;
1472        struct gasket_dev *gasket_dev;
1473        struct device *parent;
1474
1475        dev_dbg(&pci_dev->dev, "add PCI gasket device\n");
1476
1477        mutex_lock(&g_mutex);
1478        internal_desc = lookup_pci_internal_desc(pci_dev);
1479        mutex_unlock(&g_mutex);
1480        if (!internal_desc) {
1481                dev_err(&pci_dev->dev,
1482                        "PCI add device called for unknown driver type\n");
1483                return -ENODEV;
1484        }
1485
1486        parent = &pci_dev->dev;
1487        ret = __gasket_add_device(parent, internal_desc, &gasket_dev);
1488        if (ret)
1489                return ret;
1490
1491        gasket_dev->pci_dev = pci_dev;
1492        ret = gasket_setup_pci(pci_dev, gasket_dev);
1493        if (ret)
1494                goto cleanup_pci;
1495
1496        /*
1497         * Once we've created the mapping structures successfully, attempt to
1498         * create a symlink to the pci directory of this object.
1499         */
1500        ret = sysfs_create_link(&gasket_dev->dev_info.device->kobj,
1501                                &pci_dev->dev.kobj, dev_name(&pci_dev->dev));
1502        if (ret) {
1503                dev_err(gasket_dev->dev,
1504                        "Cannot create sysfs pci link: %d\n", ret);
1505                goto cleanup_pci;
1506        }
1507
1508        *gasket_devp = gasket_dev;
1509        return 0;
1510
1511cleanup_pci:
1512        gasket_cleanup_pci(gasket_dev);
1513        __gasket_remove_device(internal_desc, gasket_dev);
1514        return ret;
1515}
1516EXPORT_SYMBOL(gasket_pci_add_device);
1517
1518/* Remove a PCI gasket device. */
1519void gasket_pci_remove_device(struct pci_dev *pci_dev)
1520{
1521        int i;
1522        struct gasket_internal_desc *internal_desc;
1523        struct gasket_dev *gasket_dev = NULL;
1524        /* Find the device desc. */
1525        mutex_lock(&g_mutex);
1526        internal_desc = lookup_pci_internal_desc(pci_dev);
1527        if (!internal_desc) {
1528                mutex_unlock(&g_mutex);
1529                return;
1530        }
1531        mutex_unlock(&g_mutex);
1532
1533        /* Now find the specific device */
1534        mutex_lock(&internal_desc->mutex);
1535        for (i = 0; i < GASKET_DEV_MAX; i++) {
1536                if (internal_desc->devs[i] &&
1537                    internal_desc->devs[i]->pci_dev == pci_dev) {
1538                        gasket_dev = internal_desc->devs[i];
1539                        break;
1540                }
1541        }
1542        mutex_unlock(&internal_desc->mutex);
1543
1544        if (!gasket_dev)
1545                return;
1546
1547        dev_dbg(gasket_dev->dev, "remove %s PCI gasket device\n",
1548                internal_desc->driver_desc->name);
1549
1550        gasket_cleanup_pci(gasket_dev);
1551        __gasket_remove_device(internal_desc, gasket_dev);
1552}
1553EXPORT_SYMBOL(gasket_pci_remove_device);
1554
1555/**
1556 * Lookup a name by number in a num_name table.
1557 * @num: Number to lookup.
1558 * @table: Array of num_name structures, the table for the lookup.
1559 *
1560 * Description: Searches for num in the table.  If found, the
1561 *              corresponding name is returned; otherwise NULL
1562 *              is returned.
1563 *
1564 *              The table must have a NULL name pointer at the end.
1565 */
1566const char *gasket_num_name_lookup(uint num,
1567                                   const struct gasket_num_name *table)
1568{
1569        uint i = 0;
1570
1571        while (table[i].snn_name) {
1572                if (num == table[i].snn_num)
1573                        break;
1574                ++i;
1575        }
1576
1577        return table[i].snn_name;
1578}
1579EXPORT_SYMBOL(gasket_num_name_lookup);
1580
1581int gasket_reset(struct gasket_dev *gasket_dev)
1582{
1583        int ret;
1584
1585        mutex_lock(&gasket_dev->mutex);
1586        ret = gasket_reset_nolock(gasket_dev);
1587        mutex_unlock(&gasket_dev->mutex);
1588        return ret;
1589}
1590EXPORT_SYMBOL(gasket_reset);
1591
1592int gasket_reset_nolock(struct gasket_dev *gasket_dev)
1593{
1594        int ret;
1595        int i;
1596        const struct gasket_driver_desc *driver_desc;
1597
1598        driver_desc = gasket_dev->internal_desc->driver_desc;
1599        if (!driver_desc->device_reset_cb)
1600                return 0;
1601
1602        ret = driver_desc->device_reset_cb(gasket_dev);
1603        if (ret) {
1604                dev_dbg(gasket_dev->dev, "Device reset cb returned %d.\n",
1605                        ret);
1606                return ret;
1607        }
1608
1609        /* Reinitialize the page tables and interrupt framework. */
1610        for (i = 0; i < driver_desc->num_page_tables; ++i)
1611                gasket_page_table_reset(gasket_dev->page_table[i]);
1612
1613        ret = gasket_interrupt_reinit(gasket_dev);
1614        if (ret) {
1615                dev_dbg(gasket_dev->dev, "Unable to reinit interrupts: %d.\n",
1616                        ret);
1617                return ret;
1618        }
1619
1620        /* Get current device health. */
1621        gasket_dev->status = gasket_get_hw_status(gasket_dev);
1622        if (gasket_dev->status == GASKET_STATUS_DEAD) {
1623                dev_dbg(gasket_dev->dev, "Device reported as dead.\n");
1624                return -EINVAL;
1625        }
1626
1627        return 0;
1628}
1629EXPORT_SYMBOL(gasket_reset_nolock);
1630
1631gasket_ioctl_permissions_cb_t
1632gasket_get_ioctl_permissions_cb(struct gasket_dev *gasket_dev)
1633{
1634        return gasket_dev->internal_desc->driver_desc->ioctl_permissions_cb;
1635}
1636EXPORT_SYMBOL(gasket_get_ioctl_permissions_cb);
1637
1638/* Get the driver structure for a given gasket_dev.
1639 * @dev: pointer to gasket_dev, implementing the requested driver.
1640 */
1641const struct gasket_driver_desc *gasket_get_driver_desc(struct gasket_dev *dev)
1642{
1643        return dev->internal_desc->driver_desc;
1644}
1645
1646/* Get the device structure for a given gasket_dev.
1647 * @dev: pointer to gasket_dev, implementing the requested driver.
1648 */
1649struct device *gasket_get_device(struct gasket_dev *dev)
1650{
1651        return dev->dev;
1652}
1653
1654/**
1655 * Asynchronously waits on device.
1656 * @gasket_dev: Device struct.
1657 * @bar: Bar
1658 * @offset: Register offset
1659 * @mask: Register mask
1660 * @val: Expected value
1661 * @max_retries: number of sleep periods
1662 * @delay_ms: Timeout in milliseconds
1663 *
1664 * Description: Busy waits for a specific combination of bits to be set on a
1665 * Gasket register.
1666 **/
1667int gasket_wait_with_reschedule(struct gasket_dev *gasket_dev, int bar,
1668                                u64 offset, u64 mask, u64 val,
1669                                uint max_retries, u64 delay_ms)
1670{
1671        uint retries = 0;
1672        u64 tmp;
1673
1674        while (retries < max_retries) {
1675                tmp = gasket_dev_read_64(gasket_dev, bar, offset);
1676                if ((tmp & mask) == val)
1677                        return 0;
1678                msleep(delay_ms);
1679                retries++;
1680        }
1681        dev_dbg(gasket_dev->dev, "%s timeout: reg %llx timeout (%llu ms)\n",
1682                __func__, offset, max_retries * delay_ms);
1683        return -ETIMEDOUT;
1684}
1685EXPORT_SYMBOL(gasket_wait_with_reschedule);
1686
1687/* See gasket_core.h for description. */
1688int gasket_register_device(const struct gasket_driver_desc *driver_desc)
1689{
1690        int i, ret;
1691        int desc_idx = -1;
1692        struct gasket_internal_desc *internal;
1693
1694        pr_debug("Loading %s driver version %s\n", driver_desc->name,
1695                 driver_desc->driver_version);
1696        /* Check for duplicates and find a free slot. */
1697        mutex_lock(&g_mutex);
1698
1699        for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1700                if (g_descs[i].driver_desc == driver_desc) {
1701                        pr_err("%s driver already loaded/registered\n",
1702                               driver_desc->name);
1703                        mutex_unlock(&g_mutex);
1704                        return -EBUSY;
1705                }
1706        }
1707
1708        /* This and the above loop could be combined, but this reads easier. */
1709        for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1710                if (!g_descs[i].driver_desc) {
1711                        g_descs[i].driver_desc = driver_desc;
1712                        desc_idx = i;
1713                        break;
1714                }
1715        }
1716        mutex_unlock(&g_mutex);
1717
1718        if (desc_idx == -1) {
1719                pr_err("too many drivers loaded, max %d\n",
1720                       GASKET_FRAMEWORK_DESC_MAX);
1721                return -EBUSY;
1722        }
1723
1724        internal = &g_descs[desc_idx];
1725        mutex_init(&internal->mutex);
1726        memset(internal->devs, 0, sizeof(struct gasket_dev *) * GASKET_DEV_MAX);
1727        internal->class =
1728                class_create(driver_desc->module, driver_desc->name);
1729
1730        if (IS_ERR(internal->class)) {
1731                pr_err("Cannot register %s class [ret=%ld]\n",
1732                       driver_desc->name, PTR_ERR(internal->class));
1733                ret = PTR_ERR(internal->class);
1734                goto unregister_gasket_driver;
1735        }
1736
1737        ret = register_chrdev_region(MKDEV(driver_desc->major,
1738                                           driver_desc->minor), GASKET_DEV_MAX,
1739                                     driver_desc->name);
1740        if (ret) {
1741                pr_err("cannot register %s char driver [ret=%d]\n",
1742                       driver_desc->name, ret);
1743                goto destroy_class;
1744        }
1745
1746        return 0;
1747
1748destroy_class:
1749        class_destroy(internal->class);
1750
1751unregister_gasket_driver:
1752        mutex_lock(&g_mutex);
1753        g_descs[desc_idx].driver_desc = NULL;
1754        mutex_unlock(&g_mutex);
1755        return ret;
1756}
1757EXPORT_SYMBOL(gasket_register_device);
1758
1759/* See gasket_core.h for description. */
1760void gasket_unregister_device(const struct gasket_driver_desc *driver_desc)
1761{
1762        int i, desc_idx;
1763        struct gasket_internal_desc *internal_desc = NULL;
1764
1765        mutex_lock(&g_mutex);
1766        for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1767                if (g_descs[i].driver_desc == driver_desc) {
1768                        internal_desc = &g_descs[i];
1769                        desc_idx = i;
1770                        break;
1771                }
1772        }
1773
1774        if (!internal_desc) {
1775                mutex_unlock(&g_mutex);
1776                pr_err("request to unregister unknown desc: %s, %d:%d\n",
1777                       driver_desc->name, driver_desc->major,
1778                       driver_desc->minor);
1779                return;
1780        }
1781
1782        unregister_chrdev_region(MKDEV(driver_desc->major, driver_desc->minor),
1783                                 GASKET_DEV_MAX);
1784
1785        class_destroy(internal_desc->class);
1786
1787        /* Finally, effectively "remove" the driver. */
1788        g_descs[desc_idx].driver_desc = NULL;
1789        mutex_unlock(&g_mutex);
1790
1791        pr_debug("removed %s driver\n", driver_desc->name);
1792}
1793EXPORT_SYMBOL(gasket_unregister_device);
1794
1795static int __init gasket_init(void)
1796{
1797        int i;
1798
1799        mutex_lock(&g_mutex);
1800        for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1801                g_descs[i].driver_desc = NULL;
1802                mutex_init(&g_descs[i].mutex);
1803        }
1804
1805        gasket_sysfs_init();
1806
1807        mutex_unlock(&g_mutex);
1808        return 0;
1809}
1810
1811MODULE_DESCRIPTION("Google Gasket driver framework");
1812MODULE_VERSION(GASKET_FRAMEWORK_VERSION);
1813MODULE_LICENSE("GPL v2");
1814MODULE_AUTHOR("Rob Springer <rspringer@google.com>");
1815module_init(gasket_init);
1816