linux/drivers/net/ethernet/netronome/nfp/crypto/tls.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2019 Netronome Systems, Inc. */
   3
   4#include <linux/bitfield.h>
   5#include <linux/ipv6.h>
   6#include <linux/skbuff.h>
   7#include <linux/string.h>
   8#include <net/inet6_hashtables.h>
   9#include <net/tls.h>
  10
  11#include "../ccm.h"
  12#include "../nfp_net.h"
  13#include "crypto.h"
  14#include "fw.h"
  15
  16#define NFP_NET_TLS_CCM_MBOX_OPS_MASK           \
  17        (BIT(NFP_CCM_TYPE_CRYPTO_RESET) |       \
  18         BIT(NFP_CCM_TYPE_CRYPTO_ADD) |         \
  19         BIT(NFP_CCM_TYPE_CRYPTO_DEL) |         \
  20         BIT(NFP_CCM_TYPE_CRYPTO_UPDATE))
  21
  22#define NFP_NET_TLS_OPCODE_MASK_RX                      \
  23        BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC)
  24
  25#define NFP_NET_TLS_OPCODE_MASK_TX                      \
  26        BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC)
  27
  28#define NFP_NET_TLS_OPCODE_MASK                                         \
  29        (NFP_NET_TLS_OPCODE_MASK_RX | NFP_NET_TLS_OPCODE_MASK_TX)
  30
  31static void nfp_net_crypto_set_op(struct nfp_net *nn, u8 opcode, bool on)
  32{
  33        u32 off, val;
  34
  35        off = nn->tlv_caps.crypto_enable_off + round_down(opcode / 8, 4);
  36
  37        val = nn_readl(nn, off);
  38        if (on)
  39                val |= BIT(opcode & 31);
  40        else
  41                val &= ~BIT(opcode & 31);
  42        nn_writel(nn, off, val);
  43}
  44
  45static bool
  46__nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
  47                               enum tls_offload_ctx_dir direction)
  48{
  49        u8 opcode;
  50        int cnt;
  51
  52        if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
  53                opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
  54                nn->ktls_tx_conn_cnt += add;
  55                cnt = nn->ktls_tx_conn_cnt;
  56                nn->dp.ktls_tx = !!nn->ktls_tx_conn_cnt;
  57        } else {
  58                opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
  59                nn->ktls_rx_conn_cnt += add;
  60                cnt = nn->ktls_rx_conn_cnt;
  61        }
  62
  63        /* Care only about 0 -> 1 and 1 -> 0 transitions */
  64        if (cnt > 1)
  65                return false;
  66
  67        nfp_net_crypto_set_op(nn, opcode, cnt);
  68        return true;
  69}
  70
  71static int
  72nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
  73                             enum tls_offload_ctx_dir direction)
  74{
  75        int ret = 0;
  76
  77        /* Use the BAR lock to protect the connection counts */
  78        nn_ctrl_bar_lock(nn);
  79        if (__nfp_net_tls_conn_cnt_changed(nn, add, direction)) {
  80                ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
  81                /* Undo the cnt adjustment if failed */
  82                if (ret)
  83                        __nfp_net_tls_conn_cnt_changed(nn, -add, direction);
  84        }
  85        nn_ctrl_bar_unlock(nn);
  86
  87        return ret;
  88}
  89
  90static int
  91nfp_net_tls_conn_add(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
  92{
  93        return nfp_net_tls_conn_cnt_changed(nn, 1, direction);
  94}
  95
  96static int
  97nfp_net_tls_conn_remove(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
  98{
  99        return nfp_net_tls_conn_cnt_changed(nn, -1, direction);
 100}
 101
 102static struct sk_buff *
 103nfp_net_tls_alloc_simple(struct nfp_net *nn, size_t req_sz, gfp_t flags)
 104{
 105        return nfp_ccm_mbox_msg_alloc(nn, req_sz,
 106                                      sizeof(struct nfp_crypto_reply_simple),
 107                                      flags);
 108}
 109
 110static int
 111nfp_net_tls_communicate_simple(struct nfp_net *nn, struct sk_buff *skb,
 112                               const char *name, enum nfp_ccm_type type)
 113{
 114        struct nfp_crypto_reply_simple *reply;
 115        int err;
 116
 117        err = __nfp_ccm_mbox_communicate(nn, skb, type,
 118                                         sizeof(*reply), sizeof(*reply),
 119                                         type == NFP_CCM_TYPE_CRYPTO_DEL);
 120        if (err) {
 121                nn_dp_warn(&nn->dp, "failed to %s TLS: %d\n", name, err);
 122                return err;
 123        }
 124
 125        reply = (void *)skb->data;
 126        err = -be32_to_cpu(reply->error);
 127        if (err)
 128                nn_dp_warn(&nn->dp, "failed to %s TLS, fw replied: %d\n",
 129                           name, err);
 130        dev_consume_skb_any(skb);
 131
 132        return err;
 133}
 134
 135static void nfp_net_tls_del_fw(struct nfp_net *nn, __be32 *fw_handle)
 136{
 137        struct nfp_crypto_req_del *req;
 138        struct sk_buff *skb;
 139
 140        skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
 141        if (!skb)
 142                return;
 143
 144        req = (void *)skb->data;
 145        req->ep_id = 0;
 146        memcpy(req->handle, fw_handle, sizeof(req->handle));
 147
 148        nfp_net_tls_communicate_simple(nn, skb, "delete",
 149                                       NFP_CCM_TYPE_CRYPTO_DEL);
 150}
 151
 152static void
 153nfp_net_tls_set_ipver_vlan(struct nfp_crypto_req_add_front *front, u8 ipver)
 154{
 155        front->ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, ipver) |
 156                                        FIELD_PREP(NFP_NET_TLS_VLAN,
 157                                                   NFP_NET_TLS_VLAN_UNUSED));
 158}
 159
 160static void
 161nfp_net_tls_assign_conn_id(struct nfp_net *nn,
 162                           struct nfp_crypto_req_add_front *front)
 163{
 164        u32 len;
 165        u64 id;
 166
 167        id = atomic64_inc_return(&nn->ktls_conn_id_gen);
 168        len = front->key_len - NFP_NET_TLS_NON_ADDR_KEY_LEN;
 169
 170        memcpy(front->l3_addrs, &id, sizeof(id));
 171        memset(front->l3_addrs + sizeof(id), 0, len - sizeof(id));
 172}
 173
 174static struct nfp_crypto_req_add_back *
 175nfp_net_tls_set_ipv4(struct nfp_net *nn, struct nfp_crypto_req_add_v4 *req,
 176                     struct sock *sk, int direction)
 177{
 178        struct inet_sock *inet = inet_sk(sk);
 179
 180        req->front.key_len += sizeof(__be32) * 2;
 181
 182        if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
 183                nfp_net_tls_assign_conn_id(nn, &req->front);
 184        } else {
 185                req->src_ip = inet->inet_daddr;
 186                req->dst_ip = inet->inet_saddr;
 187        }
 188
 189        return &req->back;
 190}
 191
 192static struct nfp_crypto_req_add_back *
 193nfp_net_tls_set_ipv6(struct nfp_net *nn, struct nfp_crypto_req_add_v6 *req,
 194                     struct sock *sk, int direction)
 195{
 196#if IS_ENABLED(CONFIG_IPV6)
 197        struct ipv6_pinfo *np = inet6_sk(sk);
 198
 199        req->front.key_len += sizeof(struct in6_addr) * 2;
 200
 201        if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
 202                nfp_net_tls_assign_conn_id(nn, &req->front);
 203        } else {
 204                memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip));
 205                memcpy(req->dst_ip, &np->saddr, sizeof(req->dst_ip));
 206        }
 207
 208#endif
 209        return &req->back;
 210}
 211
 212static void
 213nfp_net_tls_set_l4(struct nfp_crypto_req_add_front *front,
 214                   struct nfp_crypto_req_add_back *back, struct sock *sk,
 215                   int direction)
 216{
 217        struct inet_sock *inet = inet_sk(sk);
 218
 219        front->l4_proto = IPPROTO_TCP;
 220
 221        if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
 222                back->src_port = 0;
 223                back->dst_port = 0;
 224        } else {
 225                back->src_port = inet->inet_dport;
 226                back->dst_port = inet->inet_sport;
 227        }
 228}
 229
 230static u8 nfp_tls_1_2_dir_to_opcode(enum tls_offload_ctx_dir direction)
 231{
 232        switch (direction) {
 233        case TLS_OFFLOAD_CTX_DIR_TX:
 234                return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
 235        case TLS_OFFLOAD_CTX_DIR_RX:
 236                return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
 237        default:
 238                WARN_ON_ONCE(1);
 239                return 0;
 240        }
 241}
 242
 243static bool
 244nfp_net_cipher_supported(struct nfp_net *nn, u16 cipher_type,
 245                         enum tls_offload_ctx_dir direction)
 246{
 247        u8 bit;
 248
 249        switch (cipher_type) {
 250        case TLS_CIPHER_AES_GCM_128:
 251                if (direction == TLS_OFFLOAD_CTX_DIR_TX)
 252                        bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
 253                else
 254                        bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
 255                break;
 256        default:
 257                return false;
 258        }
 259
 260        return nn->tlv_caps.crypto_ops & BIT(bit);
 261}
 262
 263static int
 264nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
 265                enum tls_offload_ctx_dir direction,
 266                struct tls_crypto_info *crypto_info,
 267                u32 start_offload_tcp_sn)
 268{
 269        struct tls12_crypto_info_aes_gcm_128 *tls_ci;
 270        struct nfp_net *nn = netdev_priv(netdev);
 271        struct nfp_crypto_req_add_front *front;
 272        struct nfp_net_tls_offload_ctx *ntls;
 273        struct nfp_crypto_req_add_back *back;
 274        struct nfp_crypto_reply_add *reply;
 275        struct sk_buff *skb;
 276        size_t req_sz;
 277        void *req;
 278        bool ipv6;
 279        int err;
 280
 281        BUILD_BUG_ON(sizeof(struct nfp_net_tls_offload_ctx) >
 282                     TLS_DRIVER_STATE_SIZE_TX);
 283        BUILD_BUG_ON(offsetof(struct nfp_net_tls_offload_ctx, rx_end) >
 284                     TLS_DRIVER_STATE_SIZE_RX);
 285
 286        if (!nfp_net_cipher_supported(nn, crypto_info->cipher_type, direction))
 287                return -EOPNOTSUPP;
 288
 289        switch (sk->sk_family) {
 290#if IS_ENABLED(CONFIG_IPV6)
 291        case AF_INET6:
 292                if (sk->sk_ipv6only ||
 293                    ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
 294                        req_sz = sizeof(struct nfp_crypto_req_add_v6);
 295                        ipv6 = true;
 296                        break;
 297                }
 298                fallthrough;
 299#endif
 300        case AF_INET:
 301                req_sz = sizeof(struct nfp_crypto_req_add_v4);
 302                ipv6 = false;
 303                break;
 304        default:
 305                return -EOPNOTSUPP;
 306        }
 307
 308        err = nfp_net_tls_conn_add(nn, direction);
 309        if (err)
 310                return err;
 311
 312        skb = nfp_ccm_mbox_msg_alloc(nn, req_sz, sizeof(*reply), GFP_KERNEL);
 313        if (!skb) {
 314                err = -ENOMEM;
 315                goto err_conn_remove;
 316        }
 317
 318        front = (void *)skb->data;
 319        front->ep_id = 0;
 320        front->key_len = NFP_NET_TLS_NON_ADDR_KEY_LEN;
 321        front->opcode = nfp_tls_1_2_dir_to_opcode(direction);
 322        memset(front->resv, 0, sizeof(front->resv));
 323
 324        nfp_net_tls_set_ipver_vlan(front, ipv6 ? 6 : 4);
 325
 326        req = (void *)skb->data;
 327        if (ipv6)
 328                back = nfp_net_tls_set_ipv6(nn, req, sk, direction);
 329        else
 330                back = nfp_net_tls_set_ipv4(nn, req, sk, direction);
 331
 332        nfp_net_tls_set_l4(front, back, sk, direction);
 333
 334        back->counter = 0;
 335        back->tcp_seq = cpu_to_be32(start_offload_tcp_sn);
 336
 337        tls_ci = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
 338        memcpy(back->key, tls_ci->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
 339        memset(&back->key[TLS_CIPHER_AES_GCM_128_KEY_SIZE / 4], 0,
 340               sizeof(back->key) - TLS_CIPHER_AES_GCM_128_KEY_SIZE);
 341        memcpy(back->iv, tls_ci->iv, TLS_CIPHER_AES_GCM_128_IV_SIZE);
 342        memcpy(&back->salt, tls_ci->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
 343        memcpy(back->rec_no, tls_ci->rec_seq, sizeof(tls_ci->rec_seq));
 344
 345        /* Get an extra ref on the skb so we can wipe the key after */
 346        skb_get(skb);
 347
 348        err = nfp_ccm_mbox_communicate(nn, skb, NFP_CCM_TYPE_CRYPTO_ADD,
 349                                       sizeof(*reply), sizeof(*reply));
 350        reply = (void *)skb->data;
 351
 352        /* We depend on CCM MBOX code not reallocating skb we sent
 353         * so we can clear the key material out of the memory.
 354         */
 355        if (!WARN_ON_ONCE((u8 *)back < skb->head ||
 356                          (u8 *)back > skb_end_pointer(skb)) &&
 357            !WARN_ON_ONCE((u8 *)&reply[1] > (u8 *)back))
 358                memzero_explicit(back, sizeof(*back));
 359        dev_consume_skb_any(skb); /* the extra ref from skb_get() above */
 360
 361        if (err) {
 362                nn_dp_warn(&nn->dp, "failed to add TLS: %d (%d)\n",
 363                           err, direction == TLS_OFFLOAD_CTX_DIR_TX);
 364                /* communicate frees skb on error */
 365                goto err_conn_remove;
 366        }
 367
 368        err = -be32_to_cpu(reply->error);
 369        if (err) {
 370                if (err == -ENOSPC) {
 371                        if (!atomic_fetch_inc(&nn->ktls_no_space))
 372                                nn_info(nn, "HW TLS table full\n");
 373                } else {
 374                        nn_dp_warn(&nn->dp,
 375                                   "failed to add TLS, FW replied: %d\n", err);
 376                }
 377                goto err_free_skb;
 378        }
 379
 380        if (!reply->handle[0] && !reply->handle[1]) {
 381                nn_dp_warn(&nn->dp, "FW returned NULL handle\n");
 382                err = -EINVAL;
 383                goto err_fw_remove;
 384        }
 385
 386        ntls = tls_driver_ctx(sk, direction);
 387        memcpy(ntls->fw_handle, reply->handle, sizeof(ntls->fw_handle));
 388        if (direction == TLS_OFFLOAD_CTX_DIR_TX)
 389                ntls->next_seq = start_offload_tcp_sn;
 390        dev_consume_skb_any(skb);
 391
 392        if (direction == TLS_OFFLOAD_CTX_DIR_TX)
 393                return 0;
 394
 395        if (!nn->tlv_caps.tls_resync_ss)
 396                tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT);
 397
 398        return 0;
 399
 400err_fw_remove:
 401        nfp_net_tls_del_fw(nn, reply->handle);
 402err_free_skb:
 403        dev_consume_skb_any(skb);
 404err_conn_remove:
 405        nfp_net_tls_conn_remove(nn, direction);
 406        return err;
 407}
 408
 409static void
 410nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx,
 411                enum tls_offload_ctx_dir direction)
 412{
 413        struct nfp_net *nn = netdev_priv(netdev);
 414        struct nfp_net_tls_offload_ctx *ntls;
 415
 416        nfp_net_tls_conn_remove(nn, direction);
 417
 418        ntls = __tls_driver_ctx(tls_ctx, direction);
 419        nfp_net_tls_del_fw(nn, ntls->fw_handle);
 420}
 421
 422static int
 423nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
 424                   u8 *rcd_sn, enum tls_offload_ctx_dir direction)
 425{
 426        struct nfp_net *nn = netdev_priv(netdev);
 427        struct nfp_net_tls_offload_ctx *ntls;
 428        struct nfp_crypto_req_update *req;
 429        enum nfp_ccm_type type;
 430        struct sk_buff *skb;
 431        gfp_t flags;
 432        int err;
 433
 434        flags = direction == TLS_OFFLOAD_CTX_DIR_TX ? GFP_KERNEL : GFP_ATOMIC;
 435        skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags);
 436        if (!skb)
 437                return -ENOMEM;
 438
 439        ntls = tls_driver_ctx(sk, direction);
 440        req = (void *)skb->data;
 441        req->ep_id = 0;
 442        req->opcode = nfp_tls_1_2_dir_to_opcode(direction);
 443        memset(req->resv, 0, sizeof(req->resv));
 444        memcpy(req->handle, ntls->fw_handle, sizeof(ntls->fw_handle));
 445        req->tcp_seq = cpu_to_be32(seq);
 446        memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
 447
 448        type = NFP_CCM_TYPE_CRYPTO_UPDATE;
 449        if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
 450                err = nfp_net_tls_communicate_simple(nn, skb, "sync", type);
 451                if (err)
 452                        return err;
 453                ntls->next_seq = seq;
 454        } else {
 455                if (nn->tlv_caps.tls_resync_ss)
 456                        type = NFP_CCM_TYPE_CRYPTO_RESYNC;
 457                nfp_ccm_mbox_post(nn, skb, type,
 458                                  sizeof(struct nfp_crypto_reply_simple));
 459                atomic_inc(&nn->ktls_rx_resync_sent);
 460        }
 461
 462        return 0;
 463}
 464
 465static const struct tlsdev_ops nfp_net_tls_ops = {
 466        .tls_dev_add = nfp_net_tls_add,
 467        .tls_dev_del = nfp_net_tls_del,
 468        .tls_dev_resync = nfp_net_tls_resync,
 469};
 470
 471int nfp_net_tls_rx_resync_req(struct net_device *netdev,
 472                              struct nfp_net_tls_resync_req *req,
 473                              void *pkt, unsigned int pkt_len)
 474{
 475        struct nfp_net *nn = netdev_priv(netdev);
 476        struct nfp_net_tls_offload_ctx *ntls;
 477        struct ipv6hdr *ipv6h;
 478        struct tcphdr *th;
 479        struct iphdr *iph;
 480        struct sock *sk;
 481        __be32 tcp_seq;
 482        int err;
 483
 484        iph = pkt + req->l3_offset;
 485        ipv6h = pkt + req->l3_offset;
 486        th = pkt + req->l4_offset;
 487
 488        if ((u8 *)&th[1] > (u8 *)pkt + pkt_len) {
 489                netdev_warn_once(netdev, "invalid TLS RX resync request (l3_off: %hhu l4_off: %hhu pkt_len: %u)\n",
 490                                 req->l3_offset, req->l4_offset, pkt_len);
 491                err = -EINVAL;
 492                goto err_cnt_ign;
 493        }
 494
 495        switch (ipv6h->version) {
 496        case 4:
 497                sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
 498                                             iph->saddr, th->source, iph->daddr,
 499                                             th->dest, netdev->ifindex);
 500                break;
 501#if IS_ENABLED(CONFIG_IPV6)
 502        case 6:
 503                sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
 504                                                &ipv6h->saddr, th->source,
 505                                                &ipv6h->daddr, ntohs(th->dest),
 506                                                netdev->ifindex, 0);
 507                break;
 508#endif
 509        default:
 510                netdev_warn_once(netdev, "invalid TLS RX resync request (l3_off: %hhu l4_off: %hhu ipver: %u)\n",
 511                                 req->l3_offset, req->l4_offset, iph->version);
 512                err = -EINVAL;
 513                goto err_cnt_ign;
 514        }
 515
 516        err = 0;
 517        if (!sk)
 518                goto err_cnt_ign;
 519        if (!tls_is_sk_rx_device_offloaded(sk) ||
 520            sk->sk_shutdown & RCV_SHUTDOWN)
 521                goto err_put_sock;
 522
 523        ntls = tls_driver_ctx(sk, TLS_OFFLOAD_CTX_DIR_RX);
 524        /* some FW versions can't report the handle and report 0s */
 525        if (memchr_inv(&req->fw_handle, 0, sizeof(req->fw_handle)) &&
 526            memcmp(&req->fw_handle, &ntls->fw_handle, sizeof(ntls->fw_handle)))
 527                goto err_put_sock;
 528
 529        /* copy to ensure alignment */
 530        memcpy(&tcp_seq, &req->tcp_seq, sizeof(tcp_seq));
 531        tls_offload_rx_resync_request(sk, tcp_seq);
 532        atomic_inc(&nn->ktls_rx_resync_req);
 533
 534        sock_gen_put(sk);
 535        return 0;
 536
 537err_put_sock:
 538        sock_gen_put(sk);
 539err_cnt_ign:
 540        atomic_inc(&nn->ktls_rx_resync_ign);
 541        return err;
 542}
 543
 544static int nfp_net_tls_reset(struct nfp_net *nn)
 545{
 546        struct nfp_crypto_req_reset *req;
 547        struct sk_buff *skb;
 548
 549        skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
 550        if (!skb)
 551                return -ENOMEM;
 552
 553        req = (void *)skb->data;
 554        req->ep_id = 0;
 555
 556        return nfp_net_tls_communicate_simple(nn, skb, "reset",
 557                                              NFP_CCM_TYPE_CRYPTO_RESET);
 558}
 559
 560int nfp_net_tls_init(struct nfp_net *nn)
 561{
 562        struct net_device *netdev = nn->dp.netdev;
 563        int err;
 564
 565        if (!(nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK))
 566                return 0;
 567
 568        if ((nn->tlv_caps.mbox_cmsg_types & NFP_NET_TLS_CCM_MBOX_OPS_MASK) !=
 569            NFP_NET_TLS_CCM_MBOX_OPS_MASK)
 570                return 0;
 571
 572        if (!nfp_ccm_mbox_fits(nn, sizeof(struct nfp_crypto_req_add_v6))) {
 573                nn_warn(nn, "disabling TLS offload - mbox too small: %d\n",
 574                        nn->tlv_caps.mbox_len);
 575                return 0;
 576        }
 577
 578        err = nfp_net_tls_reset(nn);
 579        if (err)
 580                return err;
 581
 582        nn_ctrl_bar_lock(nn);
 583        nn_writel(nn, nn->tlv_caps.crypto_enable_off, 0);
 584        err = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
 585        nn_ctrl_bar_unlock(nn);
 586        if (err)
 587                return err;
 588
 589        if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_RX) {
 590                netdev->hw_features |= NETIF_F_HW_TLS_RX;
 591                netdev->features |= NETIF_F_HW_TLS_RX;
 592        }
 593        if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_TX) {
 594                netdev->hw_features |= NETIF_F_HW_TLS_TX;
 595                netdev->features |= NETIF_F_HW_TLS_TX;
 596        }
 597
 598        netdev->tlsdev_ops = &nfp_net_tls_ops;
 599
 600        return 0;
 601}
 602