linux/arch/powerpc/platforms/powernv/vas.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright 2016-17 IBM Corp.
   4 */
   5
   6#define pr_fmt(fmt) "vas: " fmt
   7
   8#include <linux/module.h>
   9#include <linux/kernel.h>
  10#include <linux/export.h>
  11#include <linux/types.h>
  12#include <linux/slab.h>
  13#include <linux/platform_device.h>
  14#include <linux/of_platform.h>
  15#include <linux/of_address.h>
  16#include <linux/of.h>
  17#include <linux/irqdomain.h>
  18#include <linux/interrupt.h>
  19#include <asm/prom.h>
  20#include <asm/xive.h>
  21
  22#include "vas.h"
  23
  24DEFINE_MUTEX(vas_mutex);
  25static LIST_HEAD(vas_instances);
  26
  27static DEFINE_PER_CPU(int, cpu_vas_id);
  28
  29static int vas_irq_fault_window_setup(struct vas_instance *vinst)
  30{
  31        int rc = 0;
  32
  33        rc = request_threaded_irq(vinst->virq, vas_fault_handler,
  34                                vas_fault_thread_fn, 0, vinst->name, vinst);
  35
  36        if (rc) {
  37                pr_err("VAS[%d]: Request IRQ(%d) failed with %d\n",
  38                                vinst->vas_id, vinst->virq, rc);
  39                goto out;
  40        }
  41
  42        rc = vas_setup_fault_window(vinst);
  43        if (rc)
  44                free_irq(vinst->virq, vinst);
  45
  46out:
  47        return rc;
  48}
  49
  50static int init_vas_instance(struct platform_device *pdev)
  51{
  52        struct device_node *dn = pdev->dev.of_node;
  53        struct vas_instance *vinst;
  54        struct xive_irq_data *xd;
  55        uint32_t chipid, hwirq;
  56        struct resource *res;
  57        int rc, cpu, vasid;
  58
  59        rc = of_property_read_u32(dn, "ibm,vas-id", &vasid);
  60        if (rc) {
  61                pr_err("No ibm,vas-id property for %s?\n", pdev->name);
  62                return -ENODEV;
  63        }
  64
  65        rc = of_property_read_u32(dn, "ibm,chip-id", &chipid);
  66        if (rc) {
  67                pr_err("No ibm,chip-id property for %s?\n", pdev->name);
  68                return -ENODEV;
  69        }
  70
  71        if (pdev->num_resources != 4) {
  72                pr_err("Unexpected DT configuration for [%s, %d]\n",
  73                                pdev->name, vasid);
  74                return -ENODEV;
  75        }
  76
  77        vinst = kzalloc(sizeof(*vinst), GFP_KERNEL);
  78        if (!vinst)
  79                return -ENOMEM;
  80
  81        vinst->name = kasprintf(GFP_KERNEL, "vas-%d", vasid);
  82        if (!vinst->name) {
  83                kfree(vinst);
  84                return -ENOMEM;
  85        }
  86
  87        INIT_LIST_HEAD(&vinst->node);
  88        ida_init(&vinst->ida);
  89        mutex_init(&vinst->mutex);
  90        vinst->vas_id = vasid;
  91        vinst->pdev = pdev;
  92
  93        res = &pdev->resource[0];
  94        vinst->hvwc_bar_start = res->start;
  95
  96        res = &pdev->resource[1];
  97        vinst->uwc_bar_start = res->start;
  98
  99        res = &pdev->resource[2];
 100        vinst->paste_base_addr = res->start;
 101
 102        res = &pdev->resource[3];
 103        if (res->end > 62) {
 104                pr_err("Bad 'paste_win_id_shift' in DT, %llx\n", res->end);
 105                goto free_vinst;
 106        }
 107
 108        vinst->paste_win_id_shift = 63 - res->end;
 109
 110        hwirq = xive_native_alloc_irq_on_chip(chipid);
 111        if (!hwirq) {
 112                pr_err("Inst%d: Unable to allocate global irq for chip %d\n",
 113                                vinst->vas_id, chipid);
 114                return -ENOENT;
 115        }
 116
 117        vinst->virq = irq_create_mapping(NULL, hwirq);
 118        if (!vinst->virq) {
 119                pr_err("Inst%d: Unable to map global irq %d\n",
 120                                vinst->vas_id, hwirq);
 121                return -EINVAL;
 122        }
 123
 124        xd = irq_get_handler_data(vinst->virq);
 125        if (!xd) {
 126                pr_err("Inst%d: Invalid virq %d\n",
 127                                vinst->vas_id, vinst->virq);
 128                return -EINVAL;
 129        }
 130
 131        vinst->irq_port = xd->trig_page;
 132        pr_devel("Initialized instance [%s, %d] paste_base 0x%llx paste_win_id_shift 0x%llx IRQ %d Port 0x%llx\n",
 133                        pdev->name, vasid, vinst->paste_base_addr,
 134                        vinst->paste_win_id_shift, vinst->virq,
 135                        vinst->irq_port);
 136
 137        for_each_possible_cpu(cpu) {
 138                if (cpu_to_chip_id(cpu) == of_get_ibm_chip_id(dn))
 139                        per_cpu(cpu_vas_id, cpu) = vasid;
 140        }
 141
 142        mutex_lock(&vas_mutex);
 143        list_add(&vinst->node, &vas_instances);
 144        mutex_unlock(&vas_mutex);
 145
 146        spin_lock_init(&vinst->fault_lock);
 147        /*
 148         * IRQ and fault handling setup is needed only for user space
 149         * send windows.
 150         */
 151        if (vinst->virq) {
 152                rc = vas_irq_fault_window_setup(vinst);
 153                /*
 154                 * Fault window is used only for user space send windows.
 155                 * So if vinst->virq is NULL, tx_win_open returns -ENODEV
 156                 * for user space.
 157                 */
 158                if (rc)
 159                        vinst->virq = 0;
 160        }
 161
 162        vas_instance_init_dbgdir(vinst);
 163
 164        dev_set_drvdata(&pdev->dev, vinst);
 165
 166        return 0;
 167
 168free_vinst:
 169        kfree(vinst->name);
 170        kfree(vinst);
 171        return -ENODEV;
 172
 173}
 174
 175/*
 176 * Although this is read/used multiple times, it is written to only
 177 * during initialization.
 178 */
 179struct vas_instance *find_vas_instance(int vasid)
 180{
 181        struct list_head *ent;
 182        struct vas_instance *vinst;
 183
 184        mutex_lock(&vas_mutex);
 185
 186        if (vasid == -1)
 187                vasid = per_cpu(cpu_vas_id, smp_processor_id());
 188
 189        list_for_each(ent, &vas_instances) {
 190                vinst = list_entry(ent, struct vas_instance, node);
 191                if (vinst->vas_id == vasid) {
 192                        mutex_unlock(&vas_mutex);
 193                        return vinst;
 194                }
 195        }
 196        mutex_unlock(&vas_mutex);
 197
 198        pr_devel("Instance %d not found\n", vasid);
 199        return NULL;
 200}
 201
 202int chip_to_vas_id(int chipid)
 203{
 204        int cpu;
 205
 206        for_each_possible_cpu(cpu) {
 207                if (cpu_to_chip_id(cpu) == chipid)
 208                        return per_cpu(cpu_vas_id, cpu);
 209        }
 210        return -1;
 211}
 212EXPORT_SYMBOL(chip_to_vas_id);
 213
 214static int vas_probe(struct platform_device *pdev)
 215{
 216        return init_vas_instance(pdev);
 217}
 218
 219static const struct of_device_id powernv_vas_match[] = {
 220        { .compatible = "ibm,vas",},
 221        {},
 222};
 223
 224static struct platform_driver vas_driver = {
 225        .driver = {
 226                .name = "vas",
 227                .of_match_table = powernv_vas_match,
 228        },
 229        .probe = vas_probe,
 230};
 231
 232static int __init vas_init(void)
 233{
 234        int found = 0;
 235        struct device_node *dn;
 236
 237        platform_driver_register(&vas_driver);
 238
 239        for_each_compatible_node(dn, NULL, "ibm,vas") {
 240                of_platform_device_create(dn, NULL, NULL);
 241                found++;
 242        }
 243
 244        if (!found) {
 245                platform_driver_unregister(&vas_driver);
 246                return -ENODEV;
 247        }
 248
 249        pr_devel("Found %d instances\n", found);
 250
 251        return 0;
 252}
 253device_initcall(vas_init);
 254