linux/drivers/crypto/qat/qat_common/adf_dev_mgr.c
<<
>>
Prefs
   1/*
   2  This file is provided under a dual BSD/GPLv2 license.  When using or
   3  redistributing this file, you may do so under either license.
   4
   5  GPL LICENSE SUMMARY
   6  Copyright(c) 2014 Intel Corporation.
   7  This program is free software; you can redistribute it and/or modify
   8  it under the terms of version 2 of the GNU General Public License as
   9  published by the Free Software Foundation.
  10
  11  This program is distributed in the hope that it will be useful, but
  12  WITHOUT ANY WARRANTY; without even the implied warranty of
  13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14  General Public License for more details.
  15
  16  Contact Information:
  17  qat-linux@intel.com
  18
  19  BSD LICENSE
  20  Copyright(c) 2014 Intel Corporation.
  21  Redistribution and use in source and binary forms, with or without
  22  modification, are permitted provided that the following conditions
  23  are met:
  24
  25    * Redistributions of source code must retain the above copyright
  26      notice, this list of conditions and the following disclaimer.
  27    * Redistributions in binary form must reproduce the above copyright
  28      notice, this list of conditions and the following disclaimer in
  29      the documentation and/or other materials provided with the
  30      distribution.
  31    * Neither the name of Intel Corporation nor the names of its
  32      contributors may be used to endorse or promote products derived
  33      from this software without specific prior written permission.
  34
  35  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  36  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  37  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  38  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  39  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  40  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  41  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  42  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  43  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  44  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  45  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  46*/
  47#include <linux/mutex.h>
  48#include <linux/list.h>
  49#include "adf_cfg.h"
  50#include "adf_common_drv.h"
  51
  52static LIST_HEAD(accel_table);
  53static LIST_HEAD(vfs_table);
  54static DEFINE_MUTEX(table_lock);
  55static uint32_t num_devices;
  56static u8 id_map[ADF_MAX_DEVICES];
  57
  58struct vf_id_map {
  59        u32 bdf;
  60        u32 id;
  61        u32 fake_id;
  62        bool attached;
  63        struct list_head list;
  64};
  65
  66static int adf_get_vf_id(struct adf_accel_dev *vf)
  67{
  68        return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
  69                PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
  70                (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
  71}
  72
  73static int adf_get_vf_num(struct adf_accel_dev *vf)
  74{
  75        return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
  76}
  77
  78static struct vf_id_map *adf_find_vf(u32 bdf)
  79{
  80        struct list_head *itr;
  81
  82        list_for_each(itr, &vfs_table) {
  83                struct vf_id_map *ptr =
  84                        list_entry(itr, struct vf_id_map, list);
  85
  86                if (ptr->bdf == bdf)
  87                        return ptr;
  88        }
  89        return NULL;
  90}
  91
  92static int adf_get_vf_real_id(u32 fake)
  93{
  94        struct list_head *itr;
  95
  96        list_for_each(itr, &vfs_table) {
  97                struct vf_id_map *ptr =
  98                        list_entry(itr, struct vf_id_map, list);
  99                if (ptr->fake_id == fake)
 100                        return ptr->id;
 101        }
 102        return -1;
 103}
 104
 105/**
 106 * adf_clean_vf_map() - Cleans VF id mapings
 107 *
 108 * Function cleans internal ids for virtual functions.
 109 * @vf: flag indicating whether mappings is cleaned
 110 *      for vfs only or for vfs and pfs
 111 */
 112void adf_clean_vf_map(bool vf)
 113{
 114        struct vf_id_map *map;
 115        struct list_head *ptr, *tmp;
 116
 117        mutex_lock(&table_lock);
 118        list_for_each_safe(ptr, tmp, &vfs_table) {
 119                map = list_entry(ptr, struct vf_id_map, list);
 120                if (map->bdf != -1) {
 121                        id_map[map->id] = 0;
 122                        num_devices--;
 123                }
 124
 125                if (vf && map->bdf == -1)
 126                        continue;
 127
 128                list_del(ptr);
 129                kfree(map);
 130        }
 131        mutex_unlock(&table_lock);
 132}
 133EXPORT_SYMBOL_GPL(adf_clean_vf_map);
 134
 135/**
 136 * adf_devmgr_update_class_index() - Update internal index
 137 * @hw_data:  Pointer to internal device data.
 138 *
 139 * Function updates internal dev index for VFs
 140 */
 141void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
 142{
 143        struct adf_hw_device_class *class = hw_data->dev_class;
 144        struct list_head *itr;
 145        int i = 0;
 146
 147        list_for_each(itr, &accel_table) {
 148                struct adf_accel_dev *ptr =
 149                                list_entry(itr, struct adf_accel_dev, list);
 150
 151                if (ptr->hw_device->dev_class == class)
 152                        ptr->hw_device->instance_id = i++;
 153
 154                if (i == class->instances)
 155                        break;
 156        }
 157}
 158EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
 159
 160static unsigned int adf_find_free_id(void)
 161{
 162        unsigned int i;
 163
 164        for (i = 0; i < ADF_MAX_DEVICES; i++) {
 165                if (!id_map[i]) {
 166                        id_map[i] = 1;
 167                        return i;
 168                }
 169        }
 170        return ADF_MAX_DEVICES + 1;
 171}
 172
 173/**
 174 * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
 175 * @accel_dev:  Pointer to acceleration device.
 176 * @pf:         Corresponding PF if the accel_dev is a VF
 177 *
 178 * Function adds acceleration device to the acceleration framework.
 179 * To be used by QAT device specific drivers.
 180 *
 181 * Return: 0 on success, error code otherwise.
 182 */
 183int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
 184                       struct adf_accel_dev *pf)
 185{
 186        struct list_head *itr;
 187        int ret = 0;
 188
 189        if (num_devices == ADF_MAX_DEVICES) {
 190                dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
 191                        ADF_MAX_DEVICES);
 192                return -EFAULT;
 193        }
 194
 195        mutex_lock(&table_lock);
 196        atomic_set(&accel_dev->ref_count, 0);
 197
 198        /* PF on host or VF on guest */
 199        if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
 200                struct vf_id_map *map;
 201
 202                list_for_each(itr, &accel_table) {
 203                        struct adf_accel_dev *ptr =
 204                                list_entry(itr, struct adf_accel_dev, list);
 205
 206                        if (ptr == accel_dev) {
 207                                ret = -EEXIST;
 208                                goto unlock;
 209                        }
 210                }
 211
 212                list_add_tail(&accel_dev->list, &accel_table);
 213                accel_dev->accel_id = adf_find_free_id();
 214                if (accel_dev->accel_id > ADF_MAX_DEVICES) {
 215                        ret = -EFAULT;
 216                        goto unlock;
 217                }
 218                num_devices++;
 219                map = kzalloc(sizeof(*map), GFP_KERNEL);
 220                if (!map) {
 221                        ret = -ENOMEM;
 222                        goto unlock;
 223                }
 224                map->bdf = ~0;
 225                map->id = accel_dev->accel_id;
 226                map->fake_id = map->id;
 227                map->attached = true;
 228                list_add_tail(&map->list, &vfs_table);
 229        } else if (accel_dev->is_vf && pf) {
 230                /* VF on host */
 231                struct vf_id_map *map;
 232
 233                map = adf_find_vf(adf_get_vf_num(accel_dev));
 234                if (map) {
 235                        struct vf_id_map *next;
 236
 237                        accel_dev->accel_id = map->id;
 238                        list_add_tail(&accel_dev->list, &accel_table);
 239                        map->fake_id++;
 240                        map->attached = true;
 241                        next = list_next_entry(map, list);
 242                        while (next && &next->list != &vfs_table) {
 243                                next->fake_id++;
 244                                next = list_next_entry(next, list);
 245                        }
 246
 247                        ret = 0;
 248                        goto unlock;
 249                }
 250
 251                map = kzalloc(sizeof(*map), GFP_KERNEL);
 252                if (!map) {
 253                        ret = -ENOMEM;
 254                        goto unlock;
 255                }
 256                accel_dev->accel_id = adf_find_free_id();
 257                if (accel_dev->accel_id > ADF_MAX_DEVICES) {
 258                        kfree(map);
 259                        ret = -EFAULT;
 260                        goto unlock;
 261                }
 262                num_devices++;
 263                list_add_tail(&accel_dev->list, &accel_table);
 264                map->bdf = adf_get_vf_num(accel_dev);
 265                map->id = accel_dev->accel_id;
 266                map->fake_id = map->id;
 267                map->attached = true;
 268                list_add_tail(&map->list, &vfs_table);
 269        }
 270unlock:
 271        mutex_unlock(&table_lock);
 272        return ret;
 273}
 274EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
 275
 276struct list_head *adf_devmgr_get_head(void)
 277{
 278        return &accel_table;
 279}
 280
 281/**
 282 * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
 283 * @accel_dev:  Pointer to acceleration device.
 284 * @pf:         Corresponding PF if the accel_dev is a VF
 285 *
 286 * Function removes acceleration device from the acceleration framework.
 287 * To be used by QAT device specific drivers.
 288 *
 289 * Return: void
 290 */
 291void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
 292                       struct adf_accel_dev *pf)
 293{
 294        mutex_lock(&table_lock);
 295        if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
 296                id_map[accel_dev->accel_id] = 0;
 297                num_devices--;
 298        } else if (accel_dev->is_vf && pf) {
 299                struct vf_id_map *map, *next;
 300
 301                map = adf_find_vf(adf_get_vf_num(accel_dev));
 302                if (!map) {
 303                        dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
 304                        goto unlock;
 305                }
 306                map->fake_id--;
 307                map->attached = false;
 308                next = list_next_entry(map, list);
 309                while (next && &next->list != &vfs_table) {
 310                        next->fake_id--;
 311                        next = list_next_entry(next, list);
 312                }
 313        }
 314unlock:
 315        list_del(&accel_dev->list);
 316        mutex_unlock(&table_lock);
 317}
 318EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
 319
 320struct adf_accel_dev *adf_devmgr_get_first(void)
 321{
 322        struct adf_accel_dev *dev = NULL;
 323
 324        if (!list_empty(&accel_table))
 325                dev = list_first_entry(&accel_table, struct adf_accel_dev,
 326                                       list);
 327        return dev;
 328}
 329
 330/**
 331 * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
 332 * @accel_dev:  Pointer to pci device.
 333 *
 334 * Function returns acceleration device associated with the given pci device.
 335 * To be used by QAT device specific drivers.
 336 *
 337 * Return: pointer to accel_dev or NULL if not found.
 338 */
 339struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
 340{
 341        struct list_head *itr;
 342
 343        mutex_lock(&table_lock);
 344        list_for_each(itr, &accel_table) {
 345                struct adf_accel_dev *ptr =
 346                                list_entry(itr, struct adf_accel_dev, list);
 347
 348                if (ptr->accel_pci_dev.pci_dev == pci_dev) {
 349                        mutex_unlock(&table_lock);
 350                        return ptr;
 351                }
 352        }
 353        mutex_unlock(&table_lock);
 354        return NULL;
 355}
 356EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
 357
 358struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
 359{
 360        struct list_head *itr;
 361        int real_id;
 362
 363        mutex_lock(&table_lock);
 364        real_id = adf_get_vf_real_id(id);
 365        if (real_id < 0)
 366                goto unlock;
 367
 368        id = real_id;
 369
 370        list_for_each(itr, &accel_table) {
 371                struct adf_accel_dev *ptr =
 372                                list_entry(itr, struct adf_accel_dev, list);
 373                if (ptr->accel_id == id) {
 374                        mutex_unlock(&table_lock);
 375                        return ptr;
 376                }
 377        }
 378unlock:
 379        mutex_unlock(&table_lock);
 380        return NULL;
 381}
 382
 383int adf_devmgr_verify_id(uint32_t id)
 384{
 385        if (id == ADF_CFG_ALL_DEVICES)
 386                return 0;
 387
 388        if (adf_devmgr_get_dev_by_id(id))
 389                return 0;
 390
 391        return -ENODEV;
 392}
 393
 394static int adf_get_num_dettached_vfs(void)
 395{
 396        struct list_head *itr;
 397        int vfs = 0;
 398
 399        mutex_lock(&table_lock);
 400        list_for_each(itr, &vfs_table) {
 401                struct vf_id_map *ptr =
 402                        list_entry(itr, struct vf_id_map, list);
 403                if (ptr->bdf != ~0 && !ptr->attached)
 404                        vfs++;
 405        }
 406        mutex_unlock(&table_lock);
 407        return vfs;
 408}
 409
 410void adf_devmgr_get_num_dev(uint32_t *num)
 411{
 412        *num = num_devices - adf_get_num_dettached_vfs();
 413}
 414
 415/**
 416 * adf_dev_in_use() - Check whether accel_dev is currently in use
 417 * @accel_dev: Pointer to acceleration device.
 418 *
 419 * To be used by QAT device specific drivers.
 420 *
 421 * Return: 1 when device is in use, 0 otherwise.
 422 */
 423int adf_dev_in_use(struct adf_accel_dev *accel_dev)
 424{
 425        return atomic_read(&accel_dev->ref_count) != 0;
 426}
 427EXPORT_SYMBOL_GPL(adf_dev_in_use);
 428
 429/**
 430 * adf_dev_get() - Increment accel_dev reference count
 431 * @accel_dev: Pointer to acceleration device.
 432 *
 433 * Increment the accel_dev refcount and if this is the first time
 434 * incrementing it during this period the accel_dev is in use,
 435 * increment the module refcount too.
 436 * To be used by QAT device specific drivers.
 437 *
 438 * Return: 0 when successful, EFAULT when fail to bump module refcount
 439 */
 440int adf_dev_get(struct adf_accel_dev *accel_dev)
 441{
 442        if (atomic_add_return(1, &accel_dev->ref_count) == 1)
 443                if (!try_module_get(accel_dev->owner))
 444                        return -EFAULT;
 445        return 0;
 446}
 447EXPORT_SYMBOL_GPL(adf_dev_get);
 448
 449/**
 450 * adf_dev_put() - Decrement accel_dev reference count
 451 * @accel_dev: Pointer to acceleration device.
 452 *
 453 * Decrement the accel_dev refcount and if this is the last time
 454 * decrementing it during this period the accel_dev is in use,
 455 * decrement the module refcount too.
 456 * To be used by QAT device specific drivers.
 457 *
 458 * Return: void
 459 */
 460void adf_dev_put(struct adf_accel_dev *accel_dev)
 461{
 462        if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
 463                module_put(accel_dev->owner);
 464}
 465EXPORT_SYMBOL_GPL(adf_dev_put);
 466
 467/**
 468 * adf_devmgr_in_reset() - Check whether device is in reset
 469 * @accel_dev: Pointer to acceleration device.
 470 *
 471 * To be used by QAT device specific drivers.
 472 *
 473 * Return: 1 when the device is being reset, 0 otherwise.
 474 */
 475int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
 476{
 477        return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
 478}
 479EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
 480
 481/**
 482 * adf_dev_started() - Check whether device has started
 483 * @accel_dev: Pointer to acceleration device.
 484 *
 485 * To be used by QAT device specific drivers.
 486 *
 487 * Return: 1 when the device has started, 0 otherwise
 488 */
 489int adf_dev_started(struct adf_accel_dev *accel_dev)
 490{
 491        return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
 492}
 493EXPORT_SYMBOL_GPL(adf_dev_started);
 494