linux/drivers/dca/dca-core.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License as published by the Free
   6 * Software Foundation; either version 2 of the License, or (at your option)
   7 * any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc., 59
  16 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  17 *
  18 * The full GNU General Public License is included in this distribution in the
  19 * file called COPYING.
  20 */
  21
  22/*
  23 * This driver supports an interface for DCA clients and providers to meet.
  24 */
  25
  26#include <linux/kernel.h>
  27#include <linux/notifier.h>
  28#include <linux/device.h>
  29#include <linux/dca.h>
  30#include <linux/slab.h>
  31#include <linux/module.h>
  32
  33#define DCA_VERSION "1.12.1"
  34
  35MODULE_VERSION(DCA_VERSION);
  36MODULE_LICENSE("GPL");
  37MODULE_AUTHOR("Intel Corporation");
  38
  39static DEFINE_RAW_SPINLOCK(dca_lock);
  40
  41static LIST_HEAD(dca_domains);
  42
  43static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
  44
  45static int dca_providers_blocked;
  46
  47static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
  48{
  49        struct pci_dev *pdev = to_pci_dev(dev);
  50        struct pci_bus *bus = pdev->bus;
  51
  52        while (bus->parent)
  53                bus = bus->parent;
  54
  55        return bus;
  56}
  57
  58static struct dca_domain *dca_allocate_domain(struct pci_bus *rc)
  59{
  60        struct dca_domain *domain;
  61
  62        domain = kzalloc(sizeof(*domain), GFP_NOWAIT);
  63        if (!domain)
  64                return NULL;
  65
  66        INIT_LIST_HEAD(&domain->dca_providers);
  67        domain->pci_rc = rc;
  68
  69        return domain;
  70}
  71
  72static void dca_free_domain(struct dca_domain *domain)
  73{
  74        list_del(&domain->node);
  75        kfree(domain);
  76}
  77
  78static int dca_provider_ioat_ver_3_0(struct device *dev)
  79{
  80        struct pci_dev *pdev = to_pci_dev(dev);
  81
  82        return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
  83                ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
  84                (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
  85                (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
  86                (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
  87                (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
  88                (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
  89                (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
  90                (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
  91}
  92
  93static void unregister_dca_providers(void)
  94{
  95        struct dca_provider *dca, *_dca;
  96        struct list_head unregistered_providers;
  97        struct dca_domain *domain;
  98        unsigned long flags;
  99
 100        blocking_notifier_call_chain(&dca_provider_chain,
 101                                     DCA_PROVIDER_REMOVE, NULL);
 102
 103        INIT_LIST_HEAD(&unregistered_providers);
 104
 105        raw_spin_lock_irqsave(&dca_lock, flags);
 106
 107        if (list_empty(&dca_domains)) {
 108                raw_spin_unlock_irqrestore(&dca_lock, flags);
 109                return;
 110        }
 111
 112        /* at this point only one domain in the list is expected */
 113        domain = list_first_entry(&dca_domains, struct dca_domain, node);
 114
 115        list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
 116                list_move(&dca->node, &unregistered_providers);
 117
 118        dca_free_domain(domain);
 119
 120        raw_spin_unlock_irqrestore(&dca_lock, flags);
 121
 122        list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
 123                dca_sysfs_remove_provider(dca);
 124                list_del(&dca->node);
 125        }
 126}
 127
 128static struct dca_domain *dca_find_domain(struct pci_bus *rc)
 129{
 130        struct dca_domain *domain;
 131
 132        list_for_each_entry(domain, &dca_domains, node)
 133                if (domain->pci_rc == rc)
 134                        return domain;
 135
 136        return NULL;
 137}
 138
 139static struct dca_domain *dca_get_domain(struct device *dev)
 140{
 141        struct pci_bus *rc;
 142        struct dca_domain *domain;
 143
 144        rc = dca_pci_rc_from_dev(dev);
 145        domain = dca_find_domain(rc);
 146
 147        if (!domain) {
 148                if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
 149                        dca_providers_blocked = 1;
 150        }
 151
 152        return domain;
 153}
 154
 155static struct dca_provider *dca_find_provider_by_dev(struct device *dev)
 156{
 157        struct dca_provider *dca;
 158        struct pci_bus *rc;
 159        struct dca_domain *domain;
 160
 161        if (dev) {
 162                rc = dca_pci_rc_from_dev(dev);
 163                domain = dca_find_domain(rc);
 164                if (!domain)
 165                        return NULL;
 166        } else {
 167                if (!list_empty(&dca_domains))
 168                        domain = list_first_entry(&dca_domains,
 169                                                  struct dca_domain,
 170                                                  node);
 171                else
 172                        return NULL;
 173        }
 174
 175        list_for_each_entry(dca, &domain->dca_providers, node)
 176                if ((!dev) || (dca->ops->dev_managed(dca, dev)))
 177                        return dca;
 178
 179        return NULL;
 180}
 181
 182/**
 183 * dca_add_requester - add a dca client to the list
 184 * @dev - the device that wants dca service
 185 */
 186int dca_add_requester(struct device *dev)
 187{
 188        struct dca_provider *dca;
 189        int err, slot = -ENODEV;
 190        unsigned long flags;
 191        struct pci_bus *pci_rc;
 192        struct dca_domain *domain;
 193
 194        if (!dev)
 195                return -EFAULT;
 196
 197        raw_spin_lock_irqsave(&dca_lock, flags);
 198
 199        /* check if the requester has not been added already */
 200        dca = dca_find_provider_by_dev(dev);
 201        if (dca) {
 202                raw_spin_unlock_irqrestore(&dca_lock, flags);
 203                return -EEXIST;
 204        }
 205
 206        pci_rc = dca_pci_rc_from_dev(dev);
 207        domain = dca_find_domain(pci_rc);
 208        if (!domain) {
 209                raw_spin_unlock_irqrestore(&dca_lock, flags);
 210                return -ENODEV;
 211        }
 212
 213        list_for_each_entry(dca, &domain->dca_providers, node) {
 214                slot = dca->ops->add_requester(dca, dev);
 215                if (slot >= 0)
 216                        break;
 217        }
 218
 219        raw_spin_unlock_irqrestore(&dca_lock, flags);
 220
 221        if (slot < 0)
 222                return slot;
 223
 224        err = dca_sysfs_add_req(dca, dev, slot);
 225        if (err) {
 226                raw_spin_lock_irqsave(&dca_lock, flags);
 227                if (dca == dca_find_provider_by_dev(dev))
 228                        dca->ops->remove_requester(dca, dev);
 229                raw_spin_unlock_irqrestore(&dca_lock, flags);
 230                return err;
 231        }
 232
 233        return 0;
 234}
 235EXPORT_SYMBOL_GPL(dca_add_requester);
 236
 237/**
 238 * dca_remove_requester - remove a dca client from the list
 239 * @dev - the device that wants dca service
 240 */
 241int dca_remove_requester(struct device *dev)
 242{
 243        struct dca_provider *dca;
 244        int slot;
 245        unsigned long flags;
 246
 247        if (!dev)
 248                return -EFAULT;
 249
 250        raw_spin_lock_irqsave(&dca_lock, flags);
 251        dca = dca_find_provider_by_dev(dev);
 252        if (!dca) {
 253                raw_spin_unlock_irqrestore(&dca_lock, flags);
 254                return -ENODEV;
 255        }
 256        slot = dca->ops->remove_requester(dca, dev);
 257        raw_spin_unlock_irqrestore(&dca_lock, flags);
 258
 259        if (slot < 0)
 260                return slot;
 261
 262        dca_sysfs_remove_req(dca, slot);
 263
 264        return 0;
 265}
 266EXPORT_SYMBOL_GPL(dca_remove_requester);
 267
 268/**
 269 * dca_common_get_tag - return the dca tag (serves both new and old api)
 270 * @dev - the device that wants dca service
 271 * @cpu - the cpuid as returned by get_cpu()
 272 */
 273static u8 dca_common_get_tag(struct device *dev, int cpu)
 274{
 275        struct dca_provider *dca;
 276        u8 tag;
 277        unsigned long flags;
 278
 279        raw_spin_lock_irqsave(&dca_lock, flags);
 280
 281        dca = dca_find_provider_by_dev(dev);
 282        if (!dca) {
 283                raw_spin_unlock_irqrestore(&dca_lock, flags);
 284                return -ENODEV;
 285        }
 286        tag = dca->ops->get_tag(dca, dev, cpu);
 287
 288        raw_spin_unlock_irqrestore(&dca_lock, flags);
 289        return tag;
 290}
 291
 292/**
 293 * dca3_get_tag - return the dca tag to the requester device
 294 *                for the given cpu (new api)
 295 * @dev - the device that wants dca service
 296 * @cpu - the cpuid as returned by get_cpu()
 297 */
 298u8 dca3_get_tag(struct device *dev, int cpu)
 299{
 300        if (!dev)
 301                return -EFAULT;
 302
 303        return dca_common_get_tag(dev, cpu);
 304}
 305EXPORT_SYMBOL_GPL(dca3_get_tag);
 306
 307/**
 308 * dca_get_tag - return the dca tag for the given cpu (old api)
 309 * @cpu - the cpuid as returned by get_cpu()
 310 */
 311u8 dca_get_tag(int cpu)
 312{
 313        struct device *dev = NULL;
 314
 315        return dca_common_get_tag(dev, cpu);
 316}
 317EXPORT_SYMBOL_GPL(dca_get_tag);
 318
 319/**
 320 * alloc_dca_provider - get data struct for describing a dca provider
 321 * @ops - pointer to struct of dca operation function pointers
 322 * @priv_size - size of extra mem to be added for provider's needs
 323 */
 324struct dca_provider *alloc_dca_provider(const struct dca_ops *ops,
 325                                        int priv_size)
 326{
 327        struct dca_provider *dca;
 328        int alloc_size;
 329
 330        alloc_size = (sizeof(*dca) + priv_size);
 331        dca = kzalloc(alloc_size, GFP_KERNEL);
 332        if (!dca)
 333                return NULL;
 334        dca->ops = ops;
 335
 336        return dca;
 337}
 338EXPORT_SYMBOL_GPL(alloc_dca_provider);
 339
 340/**
 341 * free_dca_provider - release the dca provider data struct
 342 * @ops - pointer to struct of dca operation function pointers
 343 * @priv_size - size of extra mem to be added for provider's needs
 344 */
 345void free_dca_provider(struct dca_provider *dca)
 346{
 347        kfree(dca);
 348}
 349EXPORT_SYMBOL_GPL(free_dca_provider);
 350
 351/**
 352 * register_dca_provider - register a dca provider
 353 * @dca - struct created by alloc_dca_provider()
 354 * @dev - device providing dca services
 355 */
 356int register_dca_provider(struct dca_provider *dca, struct device *dev)
 357{
 358        int err;
 359        unsigned long flags;
 360        struct dca_domain *domain, *newdomain = NULL;
 361
 362        raw_spin_lock_irqsave(&dca_lock, flags);
 363        if (dca_providers_blocked) {
 364                raw_spin_unlock_irqrestore(&dca_lock, flags);
 365                return -ENODEV;
 366        }
 367        raw_spin_unlock_irqrestore(&dca_lock, flags);
 368
 369        err = dca_sysfs_add_provider(dca, dev);
 370        if (err)
 371                return err;
 372
 373        raw_spin_lock_irqsave(&dca_lock, flags);
 374        domain = dca_get_domain(dev);
 375        if (!domain) {
 376                struct pci_bus *rc;
 377
 378                if (dca_providers_blocked) {
 379                        raw_spin_unlock_irqrestore(&dca_lock, flags);
 380                        dca_sysfs_remove_provider(dca);
 381                        unregister_dca_providers();
 382                        return -ENODEV;
 383                }
 384
 385                raw_spin_unlock_irqrestore(&dca_lock, flags);
 386                rc = dca_pci_rc_from_dev(dev);
 387                newdomain = dca_allocate_domain(rc);
 388                if (!newdomain)
 389                        return -ENODEV;
 390                raw_spin_lock_irqsave(&dca_lock, flags);
 391                /* Recheck, we might have raced after dropping the lock */
 392                domain = dca_get_domain(dev);
 393                if (!domain) {
 394                        domain = newdomain;
 395                        newdomain = NULL;
 396                        list_add(&domain->node, &dca_domains);
 397                }
 398        }
 399        list_add(&dca->node, &domain->dca_providers);
 400        raw_spin_unlock_irqrestore(&dca_lock, flags);
 401
 402        blocking_notifier_call_chain(&dca_provider_chain,
 403                                     DCA_PROVIDER_ADD, NULL);
 404        kfree(newdomain);
 405        return 0;
 406}
 407EXPORT_SYMBOL_GPL(register_dca_provider);
 408
 409/**
 410 * unregister_dca_provider - remove a dca provider
 411 * @dca - struct created by alloc_dca_provider()
 412 */
 413void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
 414{
 415        unsigned long flags;
 416        struct pci_bus *pci_rc;
 417        struct dca_domain *domain;
 418
 419        blocking_notifier_call_chain(&dca_provider_chain,
 420                                     DCA_PROVIDER_REMOVE, NULL);
 421
 422        raw_spin_lock_irqsave(&dca_lock, flags);
 423
 424        if (list_empty(&dca_domains)) {
 425                raw_spin_unlock_irqrestore(&dca_lock, flags);
 426                return;
 427        }
 428
 429        list_del(&dca->node);
 430
 431        pci_rc = dca_pci_rc_from_dev(dev);
 432        domain = dca_find_domain(pci_rc);
 433        if (list_empty(&domain->dca_providers))
 434                dca_free_domain(domain);
 435
 436        raw_spin_unlock_irqrestore(&dca_lock, flags);
 437
 438        dca_sysfs_remove_provider(dca);
 439}
 440EXPORT_SYMBOL_GPL(unregister_dca_provider);
 441
 442/**
 443 * dca_register_notify - register a client's notifier callback
 444 */
 445void dca_register_notify(struct notifier_block *nb)
 446{
 447        blocking_notifier_chain_register(&dca_provider_chain, nb);
 448}
 449EXPORT_SYMBOL_GPL(dca_register_notify);
 450
 451/**
 452 * dca_unregister_notify - remove a client's notifier callback
 453 */
 454void dca_unregister_notify(struct notifier_block *nb)
 455{
 456        blocking_notifier_chain_unregister(&dca_provider_chain, nb);
 457}
 458EXPORT_SYMBOL_GPL(dca_unregister_notify);
 459
 460static int __init dca_init(void)
 461{
 462        pr_info("dca service started, version %s\n", DCA_VERSION);
 463        return dca_sysfs_init();
 464}
 465
 466static void __exit dca_exit(void)
 467{
 468        dca_sysfs_exit();
 469}
 470
 471arch_initcall(dca_init);
 472module_exit(dca_exit);
 473
 474