linux/drivers/s390/crypto/zcrypt_api.c
<<
>>
Prefs
   1/*
   2 *  zcrypt 2.1.0
   3 *
   4 *  Copyright IBM Corp. 2001, 2012
   5 *  Author(s): Robert Burroughs
   6 *             Eric Rossman (edrossma@us.ibm.com)
   7 *             Cornelia Huck <cornelia.huck@de.ibm.com>
   8 *
   9 *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
  10 *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
  11 *                                Ralph Wuerthner <rwuerthn@de.ibm.com>
  12 *  MSGTYPE restruct:             Holger Dengler <hd@linux.vnet.ibm.com>
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License as published by
  16 * the Free Software Foundation; either version 2, or (at your option)
  17 * any later version.
  18 *
  19 * This program is distributed in the hope that it will be useful,
  20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22 * GNU General Public License for more details.
  23 *
  24 * You should have received a copy of the GNU General Public License
  25 * along with this program; if not, write to the Free Software
  26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  27 */
  28
  29#include <linux/module.h>
  30#include <linux/init.h>
  31#include <linux/interrupt.h>
  32#include <linux/miscdevice.h>
  33#include <linux/fs.h>
  34#include <linux/proc_fs.h>
  35#include <linux/seq_file.h>
  36#include <linux/compat.h>
  37#include <linux/slab.h>
  38#include <linux/atomic.h>
  39#include <asm/uaccess.h>
  40#include <linux/hw_random.h>
  41#include <linux/debugfs.h>
  42#include <asm/debug.h>
  43
  44#include "zcrypt_debug.h"
  45#include "zcrypt_api.h"
  46
  47#include "zcrypt_msgtype6.h"
  48
  49/*
  50 * Module description.
  51 */
  52MODULE_AUTHOR("IBM Corporation");
  53MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
  54                   "Copyright IBM Corp. 2001, 2012");
  55MODULE_LICENSE("GPL");
  56
  57static int zcrypt_hwrng_seed = 1;
  58module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP);
  59MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
  60
  61static DEFINE_SPINLOCK(zcrypt_device_lock);
  62static LIST_HEAD(zcrypt_device_list);
  63static int zcrypt_device_count = 0;
  64static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
  65static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
  66
  67atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
  68EXPORT_SYMBOL(zcrypt_rescan_req);
  69
  70static int zcrypt_rng_device_add(void);
  71static void zcrypt_rng_device_remove(void);
  72
  73static DEFINE_SPINLOCK(zcrypt_ops_list_lock);
  74static LIST_HEAD(zcrypt_ops_list);
  75
  76static debug_info_t *zcrypt_dbf_common;
  77static debug_info_t *zcrypt_dbf_devices;
  78static struct dentry *debugfs_root;
  79
  80/*
  81 * Device attributes common for all crypto devices.
  82 */
  83static ssize_t zcrypt_type_show(struct device *dev,
  84                                struct device_attribute *attr, char *buf)
  85{
  86        struct zcrypt_device *zdev = to_ap_dev(dev)->private;
  87        return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string);
  88}
  89
  90static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL);
  91
  92static ssize_t zcrypt_online_show(struct device *dev,
  93                                  struct device_attribute *attr, char *buf)
  94{
  95        struct zcrypt_device *zdev = to_ap_dev(dev)->private;
  96        return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online);
  97}
  98
  99static ssize_t zcrypt_online_store(struct device *dev,
 100                                   struct device_attribute *attr,
 101                                   const char *buf, size_t count)
 102{
 103        struct zcrypt_device *zdev = to_ap_dev(dev)->private;
 104        int online;
 105
 106        if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
 107                return -EINVAL;
 108        zdev->online = online;
 109        ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dman", zdev->ap_dev->qid,
 110                       zdev->online);
 111        if (!online)
 112                ap_flush_queue(zdev->ap_dev);
 113        return count;
 114}
 115
 116static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store);
 117
 118static struct attribute * zcrypt_device_attrs[] = {
 119        &dev_attr_type.attr,
 120        &dev_attr_online.attr,
 121        NULL,
 122};
 123
 124static struct attribute_group zcrypt_device_attr_group = {
 125        .attrs = zcrypt_device_attrs,
 126};
 127
 128/**
 129 * Process a rescan of the transport layer.
 130 *
 131 * Returns 1, if the rescan has been processed, otherwise 0.
 132 */
 133static inline int zcrypt_process_rescan(void)
 134{
 135        if (atomic_read(&zcrypt_rescan_req)) {
 136                atomic_set(&zcrypt_rescan_req, 0);
 137                atomic_inc(&zcrypt_rescan_count);
 138                ap_bus_force_rescan();
 139                ZCRYPT_DBF_COMMON(DBF_INFO, "rescan%07d",
 140                                  atomic_inc_return(&zcrypt_rescan_count));
 141                return 1;
 142        }
 143        return 0;
 144}
 145
 146/**
 147 * __zcrypt_increase_preference(): Increase preference of a crypto device.
 148 * @zdev: Pointer the crypto device
 149 *
 150 * Move the device towards the head of the device list.
 151 * Need to be called while holding the zcrypt device list lock.
 152 * Note: cards with speed_rating of 0 are kept at the end of the list.
 153 */
 154static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
 155{
 156        struct zcrypt_device *tmp;
 157        struct list_head *l;
 158
 159        if (zdev->speed_rating == 0)
 160                return;
 161        for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) {
 162                tmp = list_entry(l, struct zcrypt_device, list);
 163                if ((tmp->request_count + 1) * tmp->speed_rating <=
 164                    (zdev->request_count + 1) * zdev->speed_rating &&
 165                    tmp->speed_rating != 0)
 166                        break;
 167        }
 168        if (l == zdev->list.prev)
 169                return;
 170        /* Move zdev behind l */
 171        list_move(&zdev->list, l);
 172}
 173
 174/**
 175 * __zcrypt_decrease_preference(): Decrease preference of a crypto device.
 176 * @zdev: Pointer to a crypto device.
 177 *
 178 * Move the device towards the tail of the device list.
 179 * Need to be called while holding the zcrypt device list lock.
 180 * Note: cards with speed_rating of 0 are kept at the end of the list.
 181 */
 182static void __zcrypt_decrease_preference(struct zcrypt_device *zdev)
 183{
 184        struct zcrypt_device *tmp;
 185        struct list_head *l;
 186
 187        if (zdev->speed_rating == 0)
 188                return;
 189        for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) {
 190                tmp = list_entry(l, struct zcrypt_device, list);
 191                if ((tmp->request_count + 1) * tmp->speed_rating >
 192                    (zdev->request_count + 1) * zdev->speed_rating ||
 193                    tmp->speed_rating == 0)
 194                        break;
 195        }
 196        if (l == zdev->list.next)
 197                return;
 198        /* Move zdev before l */
 199        list_move_tail(&zdev->list, l);
 200}
 201
 202static void zcrypt_device_release(struct kref *kref)
 203{
 204        struct zcrypt_device *zdev =
 205                container_of(kref, struct zcrypt_device, refcount);
 206        zcrypt_device_free(zdev);
 207}
 208
 209void zcrypt_device_get(struct zcrypt_device *zdev)
 210{
 211        kref_get(&zdev->refcount);
 212}
 213EXPORT_SYMBOL(zcrypt_device_get);
 214
 215int zcrypt_device_put(struct zcrypt_device *zdev)
 216{
 217        return kref_put(&zdev->refcount, zcrypt_device_release);
 218}
 219EXPORT_SYMBOL(zcrypt_device_put);
 220
 221struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size)
 222{
 223        struct zcrypt_device *zdev;
 224
 225        zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL);
 226        if (!zdev)
 227                return NULL;
 228        zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL);
 229        if (!zdev->reply.message)
 230                goto out_free;
 231        zdev->reply.length = max_response_size;
 232        spin_lock_init(&zdev->lock);
 233        INIT_LIST_HEAD(&zdev->list);
 234        zdev->dbf_area = zcrypt_dbf_devices;
 235        return zdev;
 236
 237out_free:
 238        kfree(zdev);
 239        return NULL;
 240}
 241EXPORT_SYMBOL(zcrypt_device_alloc);
 242
 243void zcrypt_device_free(struct zcrypt_device *zdev)
 244{
 245        kfree(zdev->reply.message);
 246        kfree(zdev);
 247}
 248EXPORT_SYMBOL(zcrypt_device_free);
 249
 250/**
 251 * zcrypt_device_register() - Register a crypto device.
 252 * @zdev: Pointer to a crypto device
 253 *
 254 * Register a crypto device. Returns 0 if successful.
 255 */
 256int zcrypt_device_register(struct zcrypt_device *zdev)
 257{
 258        int rc;
 259
 260        if (!zdev->ops)
 261                return -ENODEV;
 262        rc = sysfs_create_group(&zdev->ap_dev->device.kobj,
 263                                &zcrypt_device_attr_group);
 264        if (rc)
 265                goto out;
 266        get_device(&zdev->ap_dev->device);
 267        kref_init(&zdev->refcount);
 268        spin_lock_bh(&zcrypt_device_lock);
 269        zdev->online = 1;       /* New devices are online by default. */
 270        ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dreg", zdev->ap_dev->qid,
 271                       zdev->online);
 272        list_add_tail(&zdev->list, &zcrypt_device_list);
 273        __zcrypt_increase_preference(zdev);
 274        zcrypt_device_count++;
 275        spin_unlock_bh(&zcrypt_device_lock);
 276        if (zdev->ops->rng) {
 277                rc = zcrypt_rng_device_add();
 278                if (rc)
 279                        goto out_unregister;
 280        }
 281        return 0;
 282
 283out_unregister:
 284        spin_lock_bh(&zcrypt_device_lock);
 285        zcrypt_device_count--;
 286        list_del_init(&zdev->list);
 287        spin_unlock_bh(&zcrypt_device_lock);
 288        sysfs_remove_group(&zdev->ap_dev->device.kobj,
 289                           &zcrypt_device_attr_group);
 290        put_device(&zdev->ap_dev->device);
 291        zcrypt_device_put(zdev);
 292out:
 293        return rc;
 294}
 295EXPORT_SYMBOL(zcrypt_device_register);
 296
 297/**
 298 * zcrypt_device_unregister(): Unregister a crypto device.
 299 * @zdev: Pointer to crypto device
 300 *
 301 * Unregister a crypto device.
 302 */
 303void zcrypt_device_unregister(struct zcrypt_device *zdev)
 304{
 305        if (zdev->ops->rng)
 306                zcrypt_rng_device_remove();
 307        spin_lock_bh(&zcrypt_device_lock);
 308        zcrypt_device_count--;
 309        list_del_init(&zdev->list);
 310        spin_unlock_bh(&zcrypt_device_lock);
 311        sysfs_remove_group(&zdev->ap_dev->device.kobj,
 312                           &zcrypt_device_attr_group);
 313        put_device(&zdev->ap_dev->device);
 314        zcrypt_device_put(zdev);
 315}
 316EXPORT_SYMBOL(zcrypt_device_unregister);
 317
 318void zcrypt_msgtype_register(struct zcrypt_ops *zops)
 319{
 320        spin_lock_bh(&zcrypt_ops_list_lock);
 321        list_add_tail(&zops->list, &zcrypt_ops_list);
 322        spin_unlock_bh(&zcrypt_ops_list_lock);
 323}
 324EXPORT_SYMBOL(zcrypt_msgtype_register);
 325
 326void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
 327{
 328        spin_lock_bh(&zcrypt_ops_list_lock);
 329        list_del_init(&zops->list);
 330        spin_unlock_bh(&zcrypt_ops_list_lock);
 331}
 332EXPORT_SYMBOL(zcrypt_msgtype_unregister);
 333
 334static inline
 335struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant)
 336{
 337        struct zcrypt_ops *zops;
 338        int found = 0;
 339
 340        spin_lock_bh(&zcrypt_ops_list_lock);
 341        list_for_each_entry(zops, &zcrypt_ops_list, list) {
 342                if ((zops->variant == variant) &&
 343                    (!strncmp(zops->name, name, sizeof(zops->name)))) {
 344                        found = 1;
 345                        break;
 346                }
 347        }
 348        if (!found || !try_module_get(zops->owner))
 349                zops = NULL;
 350
 351        spin_unlock_bh(&zcrypt_ops_list_lock);
 352
 353        return zops;
 354}
 355
 356struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant)
 357{
 358        struct zcrypt_ops *zops = NULL;
 359
 360        zops = __ops_lookup(name, variant);
 361        if (!zops) {
 362                request_module("%s", name);
 363                zops = __ops_lookup(name, variant);
 364        }
 365        return zops;
 366}
 367EXPORT_SYMBOL(zcrypt_msgtype_request);
 368
 369void zcrypt_msgtype_release(struct zcrypt_ops *zops)
 370{
 371        if (zops)
 372                module_put(zops->owner);
 373}
 374EXPORT_SYMBOL(zcrypt_msgtype_release);
 375
 376/**
 377 * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
 378 *
 379 * This function is not supported beyond zcrypt 1.3.1.
 380 */
 381static ssize_t zcrypt_read(struct file *filp, char __user *buf,
 382                           size_t count, loff_t *f_pos)
 383{
 384        return -EPERM;
 385}
 386
 387/**
 388 * zcrypt_write(): Not allowed.
 389 *
 390 * Write is is not allowed
 391 */
 392static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
 393                            size_t count, loff_t *f_pos)
 394{
 395        return -EPERM;
 396}
 397
 398/**
 399 * zcrypt_open(): Count number of users.
 400 *
 401 * Device open function to count number of users.
 402 */
 403static int zcrypt_open(struct inode *inode, struct file *filp)
 404{
 405        atomic_inc(&zcrypt_open_count);
 406        return nonseekable_open(inode, filp);
 407}
 408
 409/**
 410 * zcrypt_release(): Count number of users.
 411 *
 412 * Device close function to count number of users.
 413 */
 414static int zcrypt_release(struct inode *inode, struct file *filp)
 415{
 416        atomic_dec(&zcrypt_open_count);
 417        return 0;
 418}
 419
 420/*
 421 * zcrypt ioctls.
 422 */
 423static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
 424{
 425        struct zcrypt_device *zdev;
 426        int rc;
 427
 428        if (mex->outputdatalength < mex->inputdatalength)
 429                return -EINVAL;
 430        /*
 431         * As long as outputdatalength is big enough, we can set the
 432         * outputdatalength equal to the inputdatalength, since that is the
 433         * number of bytes we will copy in any case
 434         */
 435        mex->outputdatalength = mex->inputdatalength;
 436
 437        spin_lock_bh(&zcrypt_device_lock);
 438        list_for_each_entry(zdev, &zcrypt_device_list, list) {
 439                if (!zdev->online ||
 440                    !zdev->ops->rsa_modexpo ||
 441                    zdev->min_mod_size > mex->inputdatalength ||
 442                    zdev->max_mod_size < mex->inputdatalength)
 443                        continue;
 444                zcrypt_device_get(zdev);
 445                get_device(&zdev->ap_dev->device);
 446                zdev->request_count++;
 447                __zcrypt_decrease_preference(zdev);
 448                if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
 449                        spin_unlock_bh(&zcrypt_device_lock);
 450                        rc = zdev->ops->rsa_modexpo(zdev, mex);
 451                        spin_lock_bh(&zcrypt_device_lock);
 452                        module_put(zdev->ap_dev->drv->driver.owner);
 453                }
 454                else
 455                        rc = -EAGAIN;
 456                zdev->request_count--;
 457                __zcrypt_increase_preference(zdev);
 458                put_device(&zdev->ap_dev->device);
 459                zcrypt_device_put(zdev);
 460                spin_unlock_bh(&zcrypt_device_lock);
 461                return rc;
 462        }
 463        spin_unlock_bh(&zcrypt_device_lock);
 464        return -ENODEV;
 465}
 466
 467static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
 468{
 469        struct zcrypt_device *zdev;
 470        unsigned long long z1, z2, z3;
 471        int rc, copied;
 472
 473        if (crt->outputdatalength < crt->inputdatalength)
 474                return -EINVAL;
 475        /*
 476         * As long as outputdatalength is big enough, we can set the
 477         * outputdatalength equal to the inputdatalength, since that is the
 478         * number of bytes we will copy in any case
 479         */
 480        crt->outputdatalength = crt->inputdatalength;
 481
 482        copied = 0;
 483 restart:
 484        spin_lock_bh(&zcrypt_device_lock);
 485        list_for_each_entry(zdev, &zcrypt_device_list, list) {
 486                if (!zdev->online ||
 487                    !zdev->ops->rsa_modexpo_crt ||
 488                    zdev->min_mod_size > crt->inputdatalength ||
 489                    zdev->max_mod_size < crt->inputdatalength)
 490                        continue;
 491                if (zdev->short_crt && crt->inputdatalength > 240) {
 492                        /*
 493                         * Check inputdata for leading zeros for cards
 494                         * that can't handle np_prime, bp_key, or
 495                         * u_mult_inv > 128 bytes.
 496                         */
 497                        if (copied == 0) {
 498                                unsigned int len;
 499                                spin_unlock_bh(&zcrypt_device_lock);
 500                                /* len is max 256 / 2 - 120 = 8
 501                                 * For bigger device just assume len of leading
 502                                 * 0s is 8 as stated in the requirements for
 503                                 * ica_rsa_modexpo_crt struct in zcrypt.h.
 504                                 */
 505                                if (crt->inputdatalength <= 256)
 506                                        len = crt->inputdatalength / 2 - 120;
 507                                else
 508                                        len = 8;
 509                                if (len > sizeof(z1))
 510                                        return -EFAULT;
 511                                z1 = z2 = z3 = 0;
 512                                if (copy_from_user(&z1, crt->np_prime, len) ||
 513                                    copy_from_user(&z2, crt->bp_key, len) ||
 514                                    copy_from_user(&z3, crt->u_mult_inv, len))
 515                                        return -EFAULT;
 516                                z1 = z2 = z3 = 0;
 517                                copied = 1;
 518                                /*
 519                                 * We have to restart device lookup -
 520                                 * the device list may have changed by now.
 521                                 */
 522                                goto restart;
 523                        }
 524                        if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL)
 525                                /* The device can't handle this request. */
 526                                continue;
 527                }
 528                zcrypt_device_get(zdev);
 529                get_device(&zdev->ap_dev->device);
 530                zdev->request_count++;
 531                __zcrypt_decrease_preference(zdev);
 532                if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
 533                        spin_unlock_bh(&zcrypt_device_lock);
 534                        rc = zdev->ops->rsa_modexpo_crt(zdev, crt);
 535                        spin_lock_bh(&zcrypt_device_lock);
 536                        module_put(zdev->ap_dev->drv->driver.owner);
 537                }
 538                else
 539                        rc = -EAGAIN;
 540                zdev->request_count--;
 541                __zcrypt_increase_preference(zdev);
 542                put_device(&zdev->ap_dev->device);
 543                zcrypt_device_put(zdev);
 544                spin_unlock_bh(&zcrypt_device_lock);
 545                return rc;
 546        }
 547        spin_unlock_bh(&zcrypt_device_lock);
 548        return -ENODEV;
 549}
 550
 551static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
 552{
 553        struct zcrypt_device *zdev;
 554        int rc;
 555
 556        spin_lock_bh(&zcrypt_device_lock);
 557        list_for_each_entry(zdev, &zcrypt_device_list, list) {
 558                if (!zdev->online || !zdev->ops->send_cprb ||
 559                   (zdev->ops->variant == MSGTYPE06_VARIANT_EP11) ||
 560                   (xcRB->user_defined != AUTOSELECT &&
 561                    AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined))
 562                        continue;
 563                zcrypt_device_get(zdev);
 564                get_device(&zdev->ap_dev->device);
 565                zdev->request_count++;
 566                __zcrypt_decrease_preference(zdev);
 567                if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
 568                        spin_unlock_bh(&zcrypt_device_lock);
 569                        rc = zdev->ops->send_cprb(zdev, xcRB);
 570                        spin_lock_bh(&zcrypt_device_lock);
 571                        module_put(zdev->ap_dev->drv->driver.owner);
 572                }
 573                else
 574                        rc = -EAGAIN;
 575                zdev->request_count--;
 576                __zcrypt_increase_preference(zdev);
 577                put_device(&zdev->ap_dev->device);
 578                zcrypt_device_put(zdev);
 579                spin_unlock_bh(&zcrypt_device_lock);
 580                return rc;
 581        }
 582        spin_unlock_bh(&zcrypt_device_lock);
 583        return -ENODEV;
 584}
 585
 586struct ep11_target_dev_list {
 587        unsigned short          targets_num;
 588        struct ep11_target_dev  *targets;
 589};
 590
 591static bool is_desired_ep11dev(unsigned int dev_qid,
 592                               struct ep11_target_dev_list dev_list)
 593{
 594        int n;
 595
 596        for (n = 0; n < dev_list.targets_num; n++, dev_list.targets++) {
 597                if ((AP_QID_DEVICE(dev_qid) == dev_list.targets->ap_id) &&
 598                    (AP_QID_QUEUE(dev_qid) == dev_list.targets->dom_id)) {
 599                        return true;
 600                }
 601        }
 602        return false;
 603}
 604
 605static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
 606{
 607        struct zcrypt_device *zdev;
 608        bool autoselect = false;
 609        int rc;
 610        struct ep11_target_dev_list ep11_dev_list = {
 611                .targets_num    =  0x00,
 612                .targets        =  NULL,
 613        };
 614
 615        ep11_dev_list.targets_num = (unsigned short) xcrb->targets_num;
 616
 617        /* empty list indicates autoselect (all available targets) */
 618        if (ep11_dev_list.targets_num == 0)
 619                autoselect = true;
 620        else {
 621                ep11_dev_list.targets = kcalloc((unsigned short)
 622                                                xcrb->targets_num,
 623                                                sizeof(struct ep11_target_dev),
 624                                                GFP_KERNEL);
 625                if (!ep11_dev_list.targets)
 626                        return -ENOMEM;
 627
 628                if (copy_from_user(ep11_dev_list.targets,
 629                                   (struct ep11_target_dev __force __user *)
 630                                   xcrb->targets, xcrb->targets_num *
 631                                   sizeof(struct ep11_target_dev)))
 632                        return -EFAULT;
 633        }
 634
 635        spin_lock_bh(&zcrypt_device_lock);
 636        list_for_each_entry(zdev, &zcrypt_device_list, list) {
 637                /* check if device is eligible */
 638                if (!zdev->online ||
 639                    zdev->ops->variant != MSGTYPE06_VARIANT_EP11)
 640                        continue;
 641
 642                /* check if device is selected as valid target */
 643                if (!is_desired_ep11dev(zdev->ap_dev->qid, ep11_dev_list) &&
 644                    !autoselect)
 645                        continue;
 646
 647                zcrypt_device_get(zdev);
 648                get_device(&zdev->ap_dev->device);
 649                zdev->request_count++;
 650                __zcrypt_decrease_preference(zdev);
 651                if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
 652                        spin_unlock_bh(&zcrypt_device_lock);
 653                        rc = zdev->ops->send_ep11_cprb(zdev, xcrb);
 654                        spin_lock_bh(&zcrypt_device_lock);
 655                        module_put(zdev->ap_dev->drv->driver.owner);
 656                } else {
 657                        rc = -EAGAIN;
 658                  }
 659                zdev->request_count--;
 660                __zcrypt_increase_preference(zdev);
 661                put_device(&zdev->ap_dev->device);
 662                zcrypt_device_put(zdev);
 663                spin_unlock_bh(&zcrypt_device_lock);
 664                return rc;
 665        }
 666        spin_unlock_bh(&zcrypt_device_lock);
 667        return -ENODEV;
 668}
 669
 670static long zcrypt_rng(char *buffer)
 671{
 672        struct zcrypt_device *zdev;
 673        int rc;
 674
 675        spin_lock_bh(&zcrypt_device_lock);
 676        list_for_each_entry(zdev, &zcrypt_device_list, list) {
 677                if (!zdev->online || !zdev->ops->rng)
 678                        continue;
 679                zcrypt_device_get(zdev);
 680                get_device(&zdev->ap_dev->device);
 681                zdev->request_count++;
 682                __zcrypt_decrease_preference(zdev);
 683                if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
 684                        spin_unlock_bh(&zcrypt_device_lock);
 685                        rc = zdev->ops->rng(zdev, buffer);
 686                        spin_lock_bh(&zcrypt_device_lock);
 687                        module_put(zdev->ap_dev->drv->driver.owner);
 688                } else
 689                        rc = -EAGAIN;
 690                zdev->request_count--;
 691                __zcrypt_increase_preference(zdev);
 692                put_device(&zdev->ap_dev->device);
 693                zcrypt_device_put(zdev);
 694                spin_unlock_bh(&zcrypt_device_lock);
 695                return rc;
 696        }
 697        spin_unlock_bh(&zcrypt_device_lock);
 698        return -ENODEV;
 699}
 700
 701static void zcrypt_status_mask(char status[AP_DEVICES])
 702{
 703        struct zcrypt_device *zdev;
 704
 705        memset(status, 0, sizeof(char) * AP_DEVICES);
 706        spin_lock_bh(&zcrypt_device_lock);
 707        list_for_each_entry(zdev, &zcrypt_device_list, list)
 708                status[AP_QID_DEVICE(zdev->ap_dev->qid)] =
 709                        zdev->online ? zdev->user_space_type : 0x0d;
 710        spin_unlock_bh(&zcrypt_device_lock);
 711}
 712
 713static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
 714{
 715        struct zcrypt_device *zdev;
 716
 717        memset(qdepth, 0, sizeof(char)  * AP_DEVICES);
 718        spin_lock_bh(&zcrypt_device_lock);
 719        list_for_each_entry(zdev, &zcrypt_device_list, list) {
 720                spin_lock(&zdev->ap_dev->lock);
 721                qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] =
 722                        zdev->ap_dev->pendingq_count +
 723                        zdev->ap_dev->requestq_count;
 724                spin_unlock(&zdev->ap_dev->lock);
 725        }
 726        spin_unlock_bh(&zcrypt_device_lock);
 727}
 728
 729static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
 730{
 731        struct zcrypt_device *zdev;
 732
 733        memset(reqcnt, 0, sizeof(int) * AP_DEVICES);
 734        spin_lock_bh(&zcrypt_device_lock);
 735        list_for_each_entry(zdev, &zcrypt_device_list, list) {
 736                spin_lock(&zdev->ap_dev->lock);
 737                reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] =
 738                        zdev->ap_dev->total_request_count;
 739                spin_unlock(&zdev->ap_dev->lock);
 740        }
 741        spin_unlock_bh(&zcrypt_device_lock);
 742}
 743
 744static int zcrypt_pendingq_count(void)
 745{
 746        struct zcrypt_device *zdev;
 747        int pendingq_count = 0;
 748
 749        spin_lock_bh(&zcrypt_device_lock);
 750        list_for_each_entry(zdev, &zcrypt_device_list, list) {
 751                spin_lock(&zdev->ap_dev->lock);
 752                pendingq_count += zdev->ap_dev->pendingq_count;
 753                spin_unlock(&zdev->ap_dev->lock);
 754        }
 755        spin_unlock_bh(&zcrypt_device_lock);
 756        return pendingq_count;
 757}
 758
 759static int zcrypt_requestq_count(void)
 760{
 761        struct zcrypt_device *zdev;
 762        int requestq_count = 0;
 763
 764        spin_lock_bh(&zcrypt_device_lock);
 765        list_for_each_entry(zdev, &zcrypt_device_list, list) {
 766                spin_lock(&zdev->ap_dev->lock);
 767                requestq_count += zdev->ap_dev->requestq_count;
 768                spin_unlock(&zdev->ap_dev->lock);
 769        }
 770        spin_unlock_bh(&zcrypt_device_lock);
 771        return requestq_count;
 772}
 773
 774static int zcrypt_count_type(int type)
 775{
 776        struct zcrypt_device *zdev;
 777        int device_count = 0;
 778
 779        spin_lock_bh(&zcrypt_device_lock);
 780        list_for_each_entry(zdev, &zcrypt_device_list, list)
 781                if (zdev->user_space_type == type)
 782                        device_count++;
 783        spin_unlock_bh(&zcrypt_device_lock);
 784        return device_count;
 785}
 786
 787/**
 788 * zcrypt_ica_status(): Old, depracted combi status call.
 789 *
 790 * Old, deprecated combi status call.
 791 */
 792static long zcrypt_ica_status(struct file *filp, unsigned long arg)
 793{
 794        struct ica_z90_status *pstat;
 795        int ret;
 796
 797        pstat = kzalloc(sizeof(*pstat), GFP_KERNEL);
 798        if (!pstat)
 799                return -ENOMEM;
 800        pstat->totalcount = zcrypt_device_count;
 801        pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA);
 802        pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC);
 803        pstat->requestqWaitCount = zcrypt_requestq_count();
 804        pstat->pendingqWaitCount = zcrypt_pendingq_count();
 805        pstat->totalOpenCount = atomic_read(&zcrypt_open_count);
 806        pstat->cryptoDomain = ap_domain_index;
 807        zcrypt_status_mask(pstat->status);
 808        zcrypt_qdepth_mask(pstat->qdepth);
 809        ret = 0;
 810        if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat)))
 811                ret = -EFAULT;
 812        kfree(pstat);
 813        return ret;
 814}
 815
 816static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
 817                                  unsigned long arg)
 818{
 819        int rc;
 820
 821        switch (cmd) {
 822        case ICARSAMODEXPO: {
 823                struct ica_rsa_modexpo __user *umex = (void __user *) arg;
 824                struct ica_rsa_modexpo mex;
 825                if (copy_from_user(&mex, umex, sizeof(mex)))
 826                        return -EFAULT;
 827                do {
 828                        rc = zcrypt_rsa_modexpo(&mex);
 829                } while (rc == -EAGAIN);
 830                /* on failure: retry once again after a requested rescan */
 831                if ((rc == -ENODEV) && (zcrypt_process_rescan()))
 832                        do {
 833                                rc = zcrypt_rsa_modexpo(&mex);
 834                        } while (rc == -EAGAIN);
 835                if (rc)
 836                        return rc;
 837                return put_user(mex.outputdatalength, &umex->outputdatalength);
 838        }
 839        case ICARSACRT: {
 840                struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
 841                struct ica_rsa_modexpo_crt crt;
 842                if (copy_from_user(&crt, ucrt, sizeof(crt)))
 843                        return -EFAULT;
 844                do {
 845                        rc = zcrypt_rsa_crt(&crt);
 846                } while (rc == -EAGAIN);
 847                /* on failure: retry once again after a requested rescan */
 848                if ((rc == -ENODEV) && (zcrypt_process_rescan()))
 849                        do {
 850                                rc = zcrypt_rsa_crt(&crt);
 851                        } while (rc == -EAGAIN);
 852                if (rc)
 853                        return rc;
 854                return put_user(crt.outputdatalength, &ucrt->outputdatalength);
 855        }
 856        case ZSECSENDCPRB: {
 857                struct ica_xcRB __user *uxcRB = (void __user *) arg;
 858                struct ica_xcRB xcRB;
 859                if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
 860                        return -EFAULT;
 861                do {
 862                        rc = zcrypt_send_cprb(&xcRB);
 863                } while (rc == -EAGAIN);
 864                /* on failure: retry once again after a requested rescan */
 865                if ((rc == -ENODEV) && (zcrypt_process_rescan()))
 866                        do {
 867                                rc = zcrypt_send_cprb(&xcRB);
 868                        } while (rc == -EAGAIN);
 869                if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
 870                        return -EFAULT;
 871                return rc;
 872        }
 873        case ZSENDEP11CPRB: {
 874                struct ep11_urb __user *uxcrb = (void __user *)arg;
 875                struct ep11_urb xcrb;
 876                if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
 877                        return -EFAULT;
 878                do {
 879                        rc = zcrypt_send_ep11_cprb(&xcrb);
 880                } while (rc == -EAGAIN);
 881                /* on failure: retry once again after a requested rescan */
 882                if ((rc == -ENODEV) && (zcrypt_process_rescan()))
 883                        do {
 884                                rc = zcrypt_send_ep11_cprb(&xcrb);
 885                        } while (rc == -EAGAIN);
 886                if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
 887                        return -EFAULT;
 888                return rc;
 889        }
 890        case Z90STAT_STATUS_MASK: {
 891                char status[AP_DEVICES];
 892                zcrypt_status_mask(status);
 893                if (copy_to_user((char __user *) arg, status,
 894                                 sizeof(char) * AP_DEVICES))
 895                        return -EFAULT;
 896                return 0;
 897        }
 898        case Z90STAT_QDEPTH_MASK: {
 899                char qdepth[AP_DEVICES];
 900                zcrypt_qdepth_mask(qdepth);
 901                if (copy_to_user((char __user *) arg, qdepth,
 902                                 sizeof(char) * AP_DEVICES))
 903                        return -EFAULT;
 904                return 0;
 905        }
 906        case Z90STAT_PERDEV_REQCNT: {
 907                int reqcnt[AP_DEVICES];
 908                zcrypt_perdev_reqcnt(reqcnt);
 909                if (copy_to_user((int __user *) arg, reqcnt,
 910                                 sizeof(int) * AP_DEVICES))
 911                        return -EFAULT;
 912                return 0;
 913        }
 914        case Z90STAT_REQUESTQ_COUNT:
 915                return put_user(zcrypt_requestq_count(), (int __user *) arg);
 916        case Z90STAT_PENDINGQ_COUNT:
 917                return put_user(zcrypt_pendingq_count(), (int __user *) arg);
 918        case Z90STAT_TOTALOPEN_COUNT:
 919                return put_user(atomic_read(&zcrypt_open_count),
 920                                (int __user *) arg);
 921        case Z90STAT_DOMAIN_INDEX:
 922                return put_user(ap_domain_index, (int __user *) arg);
 923        /*
 924         * Deprecated ioctls. Don't add another device count ioctl,
 925         * you can count them yourself in the user space with the
 926         * output of the Z90STAT_STATUS_MASK ioctl.
 927         */
 928        case ICAZ90STATUS:
 929                return zcrypt_ica_status(filp, arg);
 930        case Z90STAT_TOTALCOUNT:
 931                return put_user(zcrypt_device_count, (int __user *) arg);
 932        case Z90STAT_PCICACOUNT:
 933                return put_user(zcrypt_count_type(ZCRYPT_PCICA),
 934                                (int __user *) arg);
 935        case Z90STAT_PCICCCOUNT:
 936                return put_user(zcrypt_count_type(ZCRYPT_PCICC),
 937                                (int __user *) arg);
 938        case Z90STAT_PCIXCCMCL2COUNT:
 939                return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2),
 940                                (int __user *) arg);
 941        case Z90STAT_PCIXCCMCL3COUNT:
 942                return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
 943                                (int __user *) arg);
 944        case Z90STAT_PCIXCCCOUNT:
 945                return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) +
 946                                zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
 947                                (int __user *) arg);
 948        case Z90STAT_CEX2CCOUNT:
 949                return put_user(zcrypt_count_type(ZCRYPT_CEX2C),
 950                                (int __user *) arg);
 951        case Z90STAT_CEX2ACOUNT:
 952                return put_user(zcrypt_count_type(ZCRYPT_CEX2A),
 953                                (int __user *) arg);
 954        default:
 955                /* unknown ioctl number */
 956                return -ENOIOCTLCMD;
 957        }
 958}
 959
 960#ifdef CONFIG_COMPAT
 961/*
 962 * ioctl32 conversion routines
 963 */
 964struct compat_ica_rsa_modexpo {
 965        compat_uptr_t   inputdata;
 966        unsigned int    inputdatalength;
 967        compat_uptr_t   outputdata;
 968        unsigned int    outputdatalength;
 969        compat_uptr_t   b_key;
 970        compat_uptr_t   n_modulus;
 971};
 972
 973static long trans_modexpo32(struct file *filp, unsigned int cmd,
 974                            unsigned long arg)
 975{
 976        struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
 977        struct compat_ica_rsa_modexpo mex32;
 978        struct ica_rsa_modexpo mex64;
 979        long rc;
 980
 981        if (copy_from_user(&mex32, umex32, sizeof(mex32)))
 982                return -EFAULT;
 983        mex64.inputdata = compat_ptr(mex32.inputdata);
 984        mex64.inputdatalength = mex32.inputdatalength;
 985        mex64.outputdata = compat_ptr(mex32.outputdata);
 986        mex64.outputdatalength = mex32.outputdatalength;
 987        mex64.b_key = compat_ptr(mex32.b_key);
 988        mex64.n_modulus = compat_ptr(mex32.n_modulus);
 989        do {
 990                rc = zcrypt_rsa_modexpo(&mex64);
 991        } while (rc == -EAGAIN);
 992        /* on failure: retry once again after a requested rescan */
 993        if ((rc == -ENODEV) && (zcrypt_process_rescan()))
 994                do {
 995                        rc = zcrypt_rsa_modexpo(&mex64);
 996                } while (rc == -EAGAIN);
 997        if (rc)
 998                return rc;
 999        return put_user(mex64.outputdatalength,
1000                        &umex32->outputdatalength);
1001}
1002
1003struct compat_ica_rsa_modexpo_crt {
1004        compat_uptr_t   inputdata;
1005        unsigned int    inputdatalength;
1006        compat_uptr_t   outputdata;
1007        unsigned int    outputdatalength;
1008        compat_uptr_t   bp_key;
1009        compat_uptr_t   bq_key;
1010        compat_uptr_t   np_prime;
1011        compat_uptr_t   nq_prime;
1012        compat_uptr_t   u_mult_inv;
1013};
1014
1015static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
1016                                unsigned long arg)
1017{
1018        struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
1019        struct compat_ica_rsa_modexpo_crt crt32;
1020        struct ica_rsa_modexpo_crt crt64;
1021        long rc;
1022
1023        if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
1024                return -EFAULT;
1025        crt64.inputdata = compat_ptr(crt32.inputdata);
1026        crt64.inputdatalength = crt32.inputdatalength;
1027        crt64.outputdata=  compat_ptr(crt32.outputdata);
1028        crt64.outputdatalength = crt32.outputdatalength;
1029        crt64.bp_key = compat_ptr(crt32.bp_key);
1030        crt64.bq_key = compat_ptr(crt32.bq_key);
1031        crt64.np_prime = compat_ptr(crt32.np_prime);
1032        crt64.nq_prime = compat_ptr(crt32.nq_prime);
1033        crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
1034        do {
1035                rc = zcrypt_rsa_crt(&crt64);
1036        } while (rc == -EAGAIN);
1037        /* on failure: retry once again after a requested rescan */
1038        if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1039                do {
1040                        rc = zcrypt_rsa_crt(&crt64);
1041                } while (rc == -EAGAIN);
1042        if (rc)
1043                return rc;
1044        return put_user(crt64.outputdatalength,
1045                        &ucrt32->outputdatalength);
1046}
1047
1048struct compat_ica_xcRB {
1049        unsigned short  agent_ID;
1050        unsigned int    user_defined;
1051        unsigned short  request_ID;
1052        unsigned int    request_control_blk_length;
1053        unsigned char   padding1[16 - sizeof (compat_uptr_t)];
1054        compat_uptr_t   request_control_blk_addr;
1055        unsigned int    request_data_length;
1056        char            padding2[16 - sizeof (compat_uptr_t)];
1057        compat_uptr_t   request_data_address;
1058        unsigned int    reply_control_blk_length;
1059        char            padding3[16 - sizeof (compat_uptr_t)];
1060        compat_uptr_t   reply_control_blk_addr;
1061        unsigned int    reply_data_length;
1062        char            padding4[16 - sizeof (compat_uptr_t)];
1063        compat_uptr_t   reply_data_addr;
1064        unsigned short  priority_window;
1065        unsigned int    status;
1066} __attribute__((packed));
1067
1068static long trans_xcRB32(struct file *filp, unsigned int cmd,
1069                         unsigned long arg)
1070{
1071        struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
1072        struct compat_ica_xcRB xcRB32;
1073        struct ica_xcRB xcRB64;
1074        long rc;
1075
1076        if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
1077                return -EFAULT;
1078        xcRB64.agent_ID = xcRB32.agent_ID;
1079        xcRB64.user_defined = xcRB32.user_defined;
1080        xcRB64.request_ID = xcRB32.request_ID;
1081        xcRB64.request_control_blk_length =
1082                xcRB32.request_control_blk_length;
1083        xcRB64.request_control_blk_addr =
1084                compat_ptr(xcRB32.request_control_blk_addr);
1085        xcRB64.request_data_length =
1086                xcRB32.request_data_length;
1087        xcRB64.request_data_address =
1088                compat_ptr(xcRB32.request_data_address);
1089        xcRB64.reply_control_blk_length =
1090                xcRB32.reply_control_blk_length;
1091        xcRB64.reply_control_blk_addr =
1092                compat_ptr(xcRB32.reply_control_blk_addr);
1093        xcRB64.reply_data_length = xcRB32.reply_data_length;
1094        xcRB64.reply_data_addr =
1095                compat_ptr(xcRB32.reply_data_addr);
1096        xcRB64.priority_window = xcRB32.priority_window;
1097        xcRB64.status = xcRB32.status;
1098        do {
1099                rc = zcrypt_send_cprb(&xcRB64);
1100        } while (rc == -EAGAIN);
1101        /* on failure: retry once again after a requested rescan */
1102        if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1103                do {
1104                        rc = zcrypt_send_cprb(&xcRB64);
1105                } while (rc == -EAGAIN);
1106        xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
1107        xcRB32.reply_data_length = xcRB64.reply_data_length;
1108        xcRB32.status = xcRB64.status;
1109        if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32)))
1110                        return -EFAULT;
1111        return rc;
1112}
1113
1114static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
1115                         unsigned long arg)
1116{
1117        if (cmd == ICARSAMODEXPO)
1118                return trans_modexpo32(filp, cmd, arg);
1119        if (cmd == ICARSACRT)
1120                return trans_modexpo_crt32(filp, cmd, arg);
1121        if (cmd == ZSECSENDCPRB)
1122                return trans_xcRB32(filp, cmd, arg);
1123        return zcrypt_unlocked_ioctl(filp, cmd, arg);
1124}
1125#endif
1126
1127/*
1128 * Misc device file operations.
1129 */
1130static const struct file_operations zcrypt_fops = {
1131        .owner          = THIS_MODULE,
1132        .read           = zcrypt_read,
1133        .write          = zcrypt_write,
1134        .unlocked_ioctl = zcrypt_unlocked_ioctl,
1135#ifdef CONFIG_COMPAT
1136        .compat_ioctl   = zcrypt_compat_ioctl,
1137#endif
1138        .open           = zcrypt_open,
1139        .release        = zcrypt_release,
1140        .llseek         = no_llseek,
1141};
1142
1143/*
1144 * Misc device.
1145 */
1146static struct miscdevice zcrypt_misc_device = {
1147        .minor      = MISC_DYNAMIC_MINOR,
1148        .name       = "z90crypt",
1149        .fops       = &zcrypt_fops,
1150};
1151
1152/*
1153 * Deprecated /proc entry support.
1154 */
1155static struct proc_dir_entry *zcrypt_entry;
1156
1157static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len)
1158{
1159        int i;
1160
1161        for (i = 0; i < len; i++)
1162                seq_printf(m, "%01x", (unsigned int) addr[i]);
1163        seq_putc(m, ' ');
1164}
1165
1166static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len)
1167{
1168        int inl, c, cx;
1169
1170        seq_printf(m, "    ");
1171        inl = 0;
1172        for (c = 0; c < (len / 16); c++) {
1173                sprintcl(m, addr+inl, 16);
1174                inl += 16;
1175        }
1176        cx = len%16;
1177        if (cx) {
1178                sprintcl(m, addr+inl, cx);
1179                inl += cx;
1180        }
1181        seq_putc(m, '\n');
1182}
1183
1184static void sprinthx(unsigned char *title, struct seq_file *m,
1185                     unsigned char *addr, unsigned int len)
1186{
1187        int inl, r, rx;
1188
1189        seq_printf(m, "\n%s\n", title);
1190        inl = 0;
1191        for (r = 0; r < (len / 64); r++) {
1192                sprintrw(m, addr+inl, 64);
1193                inl += 64;
1194        }
1195        rx = len % 64;
1196        if (rx) {
1197                sprintrw(m, addr+inl, rx);
1198                inl += rx;
1199        }
1200        seq_putc(m, '\n');
1201}
1202
1203static void sprinthx4(unsigned char *title, struct seq_file *m,
1204                      unsigned int *array, unsigned int len)
1205{
1206        seq_printf(m, "\n%s\n", title);
1207        seq_hex_dump(m, "    ", DUMP_PREFIX_NONE, 32, 4, array, len, false);
1208        seq_putc(m, '\n');
1209}
1210
1211static int zcrypt_proc_show(struct seq_file *m, void *v)
1212{
1213        char workarea[sizeof(int) * AP_DEVICES];
1214
1215        seq_printf(m, "\nzcrypt version: %d.%d.%d\n",
1216                   ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
1217        seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index);
1218        seq_printf(m, "Total device count: %d\n", zcrypt_device_count);
1219        seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA));
1220        seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC));
1221        seq_printf(m, "PCIXCC MCL2 count: %d\n",
1222                   zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
1223        seq_printf(m, "PCIXCC MCL3 count: %d\n",
1224                   zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
1225        seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C));
1226        seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A));
1227        seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C));
1228        seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A));
1229        seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count());
1230        seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count());
1231        seq_printf(m, "Total open handles: %d\n\n",
1232                   atomic_read(&zcrypt_open_count));
1233        zcrypt_status_mask(workarea);
1234        sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
1235                 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
1236                 m, workarea, AP_DEVICES);
1237        zcrypt_qdepth_mask(workarea);
1238        sprinthx("Waiting work element counts", m, workarea, AP_DEVICES);
1239        zcrypt_perdev_reqcnt((int *) workarea);
1240        sprinthx4("Per-device successfully completed request counts",
1241                  m, (unsigned int *) workarea, AP_DEVICES);
1242        return 0;
1243}
1244
1245static int zcrypt_proc_open(struct inode *inode, struct file *file)
1246{
1247        return single_open(file, zcrypt_proc_show, NULL);
1248}
1249
1250static void zcrypt_disable_card(int index)
1251{
1252        struct zcrypt_device *zdev;
1253
1254        spin_lock_bh(&zcrypt_device_lock);
1255        list_for_each_entry(zdev, &zcrypt_device_list, list)
1256                if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
1257                        zdev->online = 0;
1258                        ap_flush_queue(zdev->ap_dev);
1259                        break;
1260                }
1261        spin_unlock_bh(&zcrypt_device_lock);
1262}
1263
1264static void zcrypt_enable_card(int index)
1265{
1266        struct zcrypt_device *zdev;
1267
1268        spin_lock_bh(&zcrypt_device_lock);
1269        list_for_each_entry(zdev, &zcrypt_device_list, list)
1270                if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
1271                        zdev->online = 1;
1272                        break;
1273                }
1274        spin_unlock_bh(&zcrypt_device_lock);
1275}
1276
1277static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer,
1278                                 size_t count, loff_t *pos)
1279{
1280        unsigned char *lbuf, *ptr;
1281        size_t local_count;
1282        int j;
1283
1284        if (count <= 0)
1285                return 0;
1286
1287#define LBUFSIZE 1200UL
1288        lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
1289        if (!lbuf)
1290                return 0;
1291
1292        local_count = min(LBUFSIZE - 1, count);
1293        if (copy_from_user(lbuf, buffer, local_count) != 0) {
1294                kfree(lbuf);
1295                return -EFAULT;
1296        }
1297        lbuf[local_count] = '\0';
1298
1299        ptr = strstr(lbuf, "Online devices");
1300        if (!ptr)
1301                goto out;
1302        ptr = strstr(ptr, "\n");
1303        if (!ptr)
1304                goto out;
1305        ptr++;
1306
1307        if (strstr(ptr, "Waiting work element counts") == NULL)
1308                goto out;
1309
1310        for (j = 0; j < 64 && *ptr; ptr++) {
1311                /*
1312                 * '0' for no device, '1' for PCICA, '2' for PCICC,
1313                 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
1314                 * '5' for CEX2C and '6' for CEX2A'
1315                 * '7' for CEX3C and '8' for CEX3A
1316                 */
1317                if (*ptr >= '0' && *ptr <= '8')
1318                        j++;
1319                else if (*ptr == 'd' || *ptr == 'D')
1320                        zcrypt_disable_card(j++);
1321                else if (*ptr == 'e' || *ptr == 'E')
1322                        zcrypt_enable_card(j++);
1323                else if (*ptr != ' ' && *ptr != '\t')
1324                        break;
1325        }
1326out:
1327        kfree(lbuf);
1328        return count;
1329}
1330
1331static const struct file_operations zcrypt_proc_fops = {
1332        .owner          = THIS_MODULE,
1333        .open           = zcrypt_proc_open,
1334        .read           = seq_read,
1335        .llseek         = seq_lseek,
1336        .release        = single_release,
1337        .write          = zcrypt_proc_write,
1338};
1339
1340static int zcrypt_rng_device_count;
1341static u32 *zcrypt_rng_buffer;
1342static int zcrypt_rng_buffer_index;
1343static DEFINE_MUTEX(zcrypt_rng_mutex);
1344
1345static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
1346{
1347        int rc;
1348
1349        /*
1350         * We don't need locking here because the RNG API guarantees serialized
1351         * read method calls.
1352         */
1353        if (zcrypt_rng_buffer_index == 0) {
1354                rc = zcrypt_rng((char *) zcrypt_rng_buffer);
1355                /* on failure: retry once again after a requested rescan */
1356                if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1357                        rc = zcrypt_rng((char *) zcrypt_rng_buffer);
1358                if (rc < 0)
1359                        return -EIO;
1360                zcrypt_rng_buffer_index = rc / sizeof *data;
1361        }
1362        *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
1363        return sizeof *data;
1364}
1365
1366static struct hwrng zcrypt_rng_dev = {
1367        .name           = "zcrypt",
1368        .data_read      = zcrypt_rng_data_read,
1369        .quality        = 990,
1370};
1371
1372static int zcrypt_rng_device_add(void)
1373{
1374        int rc = 0;
1375
1376        mutex_lock(&zcrypt_rng_mutex);
1377        if (zcrypt_rng_device_count == 0) {
1378                zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL);
1379                if (!zcrypt_rng_buffer) {
1380                        rc = -ENOMEM;
1381                        goto out;
1382                }
1383                zcrypt_rng_buffer_index = 0;
1384                if (!zcrypt_hwrng_seed)
1385                        zcrypt_rng_dev.quality = 0;
1386                rc = hwrng_register(&zcrypt_rng_dev);
1387                if (rc)
1388                        goto out_free;
1389                zcrypt_rng_device_count = 1;
1390        } else
1391                zcrypt_rng_device_count++;
1392        mutex_unlock(&zcrypt_rng_mutex);
1393        return 0;
1394
1395out_free:
1396        free_page((unsigned long) zcrypt_rng_buffer);
1397out:
1398        mutex_unlock(&zcrypt_rng_mutex);
1399        return rc;
1400}
1401
1402static void zcrypt_rng_device_remove(void)
1403{
1404        mutex_lock(&zcrypt_rng_mutex);
1405        zcrypt_rng_device_count--;
1406        if (zcrypt_rng_device_count == 0) {
1407                hwrng_unregister(&zcrypt_rng_dev);
1408                free_page((unsigned long) zcrypt_rng_buffer);
1409        }
1410        mutex_unlock(&zcrypt_rng_mutex);
1411}
1412
1413int __init zcrypt_debug_init(void)
1414{
1415        debugfs_root = debugfs_create_dir("zcrypt", NULL);
1416
1417        zcrypt_dbf_common = debug_register("zcrypt_common", 1, 1, 16);
1418        debug_register_view(zcrypt_dbf_common, &debug_hex_ascii_view);
1419        debug_set_level(zcrypt_dbf_common, DBF_ERR);
1420
1421        zcrypt_dbf_devices = debug_register("zcrypt_devices", 1, 1, 16);
1422        debug_register_view(zcrypt_dbf_devices, &debug_hex_ascii_view);
1423        debug_set_level(zcrypt_dbf_devices, DBF_ERR);
1424
1425        return 0;
1426}
1427
1428void zcrypt_debug_exit(void)
1429{
1430        debugfs_remove(debugfs_root);
1431        if (zcrypt_dbf_common)
1432                debug_unregister(zcrypt_dbf_common);
1433        if (zcrypt_dbf_devices)
1434                debug_unregister(zcrypt_dbf_devices);
1435}
1436
1437/**
1438 * zcrypt_api_init(): Module initialization.
1439 *
1440 * The module initialization code.
1441 */
1442int __init zcrypt_api_init(void)
1443{
1444        int rc;
1445
1446        rc = zcrypt_debug_init();
1447        if (rc)
1448                goto out;
1449
1450        atomic_set(&zcrypt_rescan_req, 0);
1451
1452        /* Register the request sprayer. */
1453        rc = misc_register(&zcrypt_misc_device);
1454        if (rc < 0)
1455                goto out;
1456
1457        /* Set up the proc file system */
1458        zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops);
1459        if (!zcrypt_entry) {
1460                rc = -ENOMEM;
1461                goto out_misc;
1462        }
1463
1464        return 0;
1465
1466out_misc:
1467        misc_deregister(&zcrypt_misc_device);
1468out:
1469        return rc;
1470}
1471
1472/**
1473 * zcrypt_api_exit(): Module termination.
1474 *
1475 * The module termination code.
1476 */
1477void zcrypt_api_exit(void)
1478{
1479        remove_proc_entry("driver/z90crypt", NULL);
1480        misc_deregister(&zcrypt_misc_device);
1481        zcrypt_debug_exit();
1482}
1483
1484module_init(zcrypt_api_init);
1485module_exit(zcrypt_api_exit);
1486