linux/drivers/crypto/chelsio/chcr_core.c
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
   3 *
   4 * Copyright (C) 2011-2016 Chelsio Communications.  All rights reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 *
  10 * Written and Maintained by:
  11 * Manoj Malviya (manojmalviya@chelsio.com)
  12 * Atul Gupta (atul.gupta@chelsio.com)
  13 * Jitendra Lulla (jlulla@chelsio.com)
  14 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
  15 * Harsh Jain (harsh@chelsio.com)
  16 */
  17
  18#include <linux/kernel.h>
  19#include <linux/module.h>
  20#include <linux/skbuff.h>
  21
  22#include <crypto/aes.h>
  23#include <crypto/hash.h>
  24
  25#include "t4_msg.h"
  26#include "chcr_core.h"
  27#include "cxgb4_uld.h"
  28
  29static struct chcr_driver_data drv_data;
  30
  31typedef int (*chcr_handler_func)(struct adapter *adap, unsigned char *input);
  32static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input);
  33static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
  34static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
  35
  36static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
  37        [CPL_FW6_PLD] = cpl_fw6_pld_handler,
  38};
  39
  40static struct cxgb4_uld_info chcr_uld_info = {
  41        .name = DRV_MODULE_NAME,
  42        .nrxq = MAX_ULD_QSETS,
  43        /* Max ntxq will be derived from fw config file*/
  44        .rxq_size = 1024,
  45        .add = chcr_uld_add,
  46        .state_change = chcr_uld_state_change,
  47        .rx_handler = chcr_uld_rx_handler,
  48};
  49
  50static void detach_work_fn(struct work_struct *work)
  51{
  52        struct chcr_dev *dev;
  53
  54        dev = container_of(work, struct chcr_dev, detach_work.work);
  55
  56        if (atomic_read(&dev->inflight)) {
  57                dev->wqretry--;
  58                if (dev->wqretry) {
  59                        pr_debug("Request Inflight Count %d\n",
  60                                atomic_read(&dev->inflight));
  61
  62                        schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
  63                } else {
  64                        WARN(1, "CHCR:%d request Still Pending\n",
  65                                atomic_read(&dev->inflight));
  66                        complete(&dev->detach_comp);
  67                }
  68        } else {
  69                complete(&dev->detach_comp);
  70        }
  71}
  72
  73struct uld_ctx *assign_chcr_device(void)
  74{
  75        struct uld_ctx *u_ctx = NULL;
  76
  77        /*
  78         * When multiple devices are present in system select
  79         * device in round-robin fashion for crypto operations
  80         * Although One session must use the same device to
  81         * maintain request-response ordering.
  82         */
  83        mutex_lock(&drv_data.drv_mutex);
  84        if (!list_empty(&drv_data.act_dev)) {
  85                u_ctx = drv_data.last_dev;
  86                if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
  87                        drv_data.last_dev = list_first_entry(&drv_data.act_dev,
  88                                                  struct uld_ctx, entry);
  89                else
  90                        drv_data.last_dev =
  91                                list_next_entry(drv_data.last_dev, entry);
  92        }
  93        mutex_unlock(&drv_data.drv_mutex);
  94        return u_ctx;
  95}
  96
  97static void chcr_dev_add(struct uld_ctx *u_ctx)
  98{
  99        struct chcr_dev *dev;
 100
 101        dev = &u_ctx->dev;
 102        dev->state = CHCR_ATTACH;
 103        atomic_set(&dev->inflight, 0);
 104        mutex_lock(&drv_data.drv_mutex);
 105        list_move(&u_ctx->entry, &drv_data.act_dev);
 106        if (!drv_data.last_dev)
 107                drv_data.last_dev = u_ctx;
 108        mutex_unlock(&drv_data.drv_mutex);
 109}
 110
 111static void chcr_dev_init(struct uld_ctx *u_ctx)
 112{
 113        struct chcr_dev *dev;
 114
 115        dev = &u_ctx->dev;
 116        spin_lock_init(&dev->lock_chcr_dev);
 117        INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn);
 118        init_completion(&dev->detach_comp);
 119        dev->state = CHCR_INIT;
 120        dev->wqretry = WQ_RETRY;
 121        atomic_inc(&drv_data.dev_count);
 122        atomic_set(&dev->inflight, 0);
 123        mutex_lock(&drv_data.drv_mutex);
 124        list_add_tail(&u_ctx->entry, &drv_data.inact_dev);
 125        mutex_unlock(&drv_data.drv_mutex);
 126}
 127
 128static int chcr_dev_move(struct uld_ctx *u_ctx)
 129{
 130        mutex_lock(&drv_data.drv_mutex);
 131        if (drv_data.last_dev == u_ctx) {
 132                if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
 133                        drv_data.last_dev = list_first_entry(&drv_data.act_dev,
 134                                                  struct uld_ctx, entry);
 135                else
 136                        drv_data.last_dev =
 137                                list_next_entry(drv_data.last_dev, entry);
 138        }
 139        list_move(&u_ctx->entry, &drv_data.inact_dev);
 140        if (list_empty(&drv_data.act_dev))
 141                drv_data.last_dev = NULL;
 142        atomic_dec(&drv_data.dev_count);
 143        mutex_unlock(&drv_data.drv_mutex);
 144
 145        return 0;
 146}
 147
 148static int cpl_fw6_pld_handler(struct adapter *adap,
 149                               unsigned char *input)
 150{
 151        struct crypto_async_request *req;
 152        struct cpl_fw6_pld *fw6_pld;
 153        u32 ack_err_status = 0;
 154        int error_status = 0;
 155
 156        fw6_pld = (struct cpl_fw6_pld *)input;
 157        req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
 158                                                    fw6_pld->data[1]);
 159
 160        ack_err_status =
 161                ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4));
 162        if (CHK_MAC_ERR_BIT(ack_err_status) || CHK_PAD_ERR_BIT(ack_err_status))
 163                error_status = -EBADMSG;
 164        /* call completion callback with failure status */
 165        if (req) {
 166                error_status = chcr_handle_resp(req, input, error_status);
 167        } else {
 168                pr_err("Incorrect request address from the firmware\n");
 169                return -EFAULT;
 170        }
 171        if (error_status)
 172                atomic_inc(&adap->chcr_stats.error);
 173
 174        return 0;
 175}
 176
 177int chcr_send_wr(struct sk_buff *skb)
 178{
 179        return cxgb4_crypto_send(skb->dev, skb);
 180}
 181
 182static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
 183{
 184        struct uld_ctx *u_ctx;
 185
 186        /* Create the device and add it in the device list */
 187        pr_info_once("%s\n", DRV_DESC);
 188        if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
 189                return ERR_PTR(-EOPNOTSUPP);
 190
 191        /* Create the device and add it in the device list */
 192        u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
 193        if (!u_ctx) {
 194                u_ctx = ERR_PTR(-ENOMEM);
 195                goto out;
 196        }
 197        u_ctx->lldi = *lld;
 198        chcr_dev_init(u_ctx);
 199out:
 200        return u_ctx;
 201}
 202
 203int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
 204                        const struct pkt_gl *pgl)
 205{
 206        struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
 207        struct chcr_dev *dev = &u_ctx->dev;
 208        struct adapter *adap = padap(dev);
 209        const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp;
 210
 211        if (!work_handlers[rpl->opcode]) {
 212                pr_err("Unsupported opcode %d received\n", rpl->opcode);
 213                return 0;
 214        }
 215
 216        if (!pgl)
 217                work_handlers[rpl->opcode](adap, (unsigned char *)&rsp[1]);
 218        else
 219                work_handlers[rpl->opcode](adap, pgl->va);
 220        return 0;
 221}
 222
 223static void chcr_detach_device(struct uld_ctx *u_ctx)
 224{
 225        struct chcr_dev *dev = &u_ctx->dev;
 226
 227        if (dev->state == CHCR_DETACH) {
 228                pr_debug("Detached Event received for already detach device\n");
 229                return;
 230        }
 231        dev->state = CHCR_DETACH;
 232        if (atomic_read(&dev->inflight) != 0) {
 233                schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
 234                wait_for_completion(&dev->detach_comp);
 235        }
 236
 237        // Move u_ctx to inactive_dev list
 238        chcr_dev_move(u_ctx);
 239}
 240
 241static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
 242{
 243        struct uld_ctx *u_ctx = handle;
 244        int ret = 0;
 245
 246        switch (state) {
 247        case CXGB4_STATE_UP:
 248                if (u_ctx->dev.state != CHCR_INIT) {
 249                        // ALready Initialised.
 250                        return 0;
 251                }
 252                chcr_dev_add(u_ctx);
 253                ret = start_crypto();
 254                break;
 255
 256        case CXGB4_STATE_DETACH:
 257                chcr_detach_device(u_ctx);
 258                if (!atomic_read(&drv_data.dev_count))
 259                        stop_crypto();
 260                break;
 261
 262        case CXGB4_STATE_START_RECOVERY:
 263        case CXGB4_STATE_DOWN:
 264        default:
 265                break;
 266        }
 267        return ret;
 268}
 269
 270static int __init chcr_crypto_init(void)
 271{
 272        INIT_LIST_HEAD(&drv_data.act_dev);
 273        INIT_LIST_HEAD(&drv_data.inact_dev);
 274        atomic_set(&drv_data.dev_count, 0);
 275        mutex_init(&drv_data.drv_mutex);
 276        drv_data.last_dev = NULL;
 277        cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info);
 278
 279        return 0;
 280}
 281
 282static void __exit chcr_crypto_exit(void)
 283{
 284        struct uld_ctx *u_ctx, *tmp;
 285        struct adapter *adap;
 286
 287        stop_crypto();
 288        cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
 289        /* Remove all devices from list */
 290        mutex_lock(&drv_data.drv_mutex);
 291        list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
 292                adap = padap(&u_ctx->dev);
 293                memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
 294                list_del(&u_ctx->entry);
 295                kfree(u_ctx);
 296        }
 297        list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
 298                adap = padap(&u_ctx->dev);
 299                memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
 300                list_del(&u_ctx->entry);
 301                kfree(u_ctx);
 302        }
 303        mutex_unlock(&drv_data.drv_mutex);
 304}
 305
 306module_init(chcr_crypto_init);
 307module_exit(chcr_crypto_exit);
 308
 309MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
 310MODULE_LICENSE("GPL");
 311MODULE_AUTHOR("Chelsio Communications");
 312