linux/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2018 Chelsio Communications, Inc.
   4 *
   5 * Written by: Atul Gupta (atul.gupta@chelsio.com)
   6 */
   7#include <linux/kernel.h>
   8#include <linux/module.h>
   9#include <linux/skbuff.h>
  10#include <linux/socket.h>
  11#include <linux/hash.h>
  12#include <linux/in.h>
  13#include <linux/net.h>
  14#include <linux/ip.h>
  15#include <linux/tcp.h>
  16#include <net/ipv6.h>
  17#include <net/transp_v6.h>
  18#include <net/tcp.h>
  19#include <net/tls.h>
  20
  21#include "chtls.h"
  22#include "chtls_cm.h"
  23
  24#define DRV_NAME "chtls"
  25
  26/*
  27 * chtls device management
  28 * maintains a list of the chtls devices
  29 */
  30static LIST_HEAD(cdev_list);
  31static DEFINE_MUTEX(cdev_mutex);
  32
  33static DEFINE_MUTEX(notify_mutex);
  34static RAW_NOTIFIER_HEAD(listen_notify_list);
  35static struct proto chtls_cpl_prot, chtls_cpl_protv6;
  36struct request_sock_ops chtls_rsk_ops, chtls_rsk_opsv6;
  37static uint send_page_order = (14 - PAGE_SHIFT < 0) ? 0 : 14 - PAGE_SHIFT;
  38
  39static void register_listen_notifier(struct notifier_block *nb)
  40{
  41        mutex_lock(&notify_mutex);
  42        raw_notifier_chain_register(&listen_notify_list, nb);
  43        mutex_unlock(&notify_mutex);
  44}
  45
  46static void unregister_listen_notifier(struct notifier_block *nb)
  47{
  48        mutex_lock(&notify_mutex);
  49        raw_notifier_chain_unregister(&listen_notify_list, nb);
  50        mutex_unlock(&notify_mutex);
  51}
  52
  53static int listen_notify_handler(struct notifier_block *this,
  54                                 unsigned long event, void *data)
  55{
  56        struct chtls_listen *clisten;
  57        int ret = NOTIFY_DONE;
  58
  59        clisten = (struct chtls_listen *)data;
  60
  61        switch (event) {
  62        case CHTLS_LISTEN_START:
  63                ret = chtls_listen_start(clisten->cdev, clisten->sk);
  64                kfree(clisten);
  65                break;
  66        case CHTLS_LISTEN_STOP:
  67                chtls_listen_stop(clisten->cdev, clisten->sk);
  68                kfree(clisten);
  69                break;
  70        }
  71        return ret;
  72}
  73
  74static struct notifier_block listen_notifier = {
  75        .notifier_call = listen_notify_handler
  76};
  77
  78static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
  79{
  80        if (likely(skb_transport_header(skb) != skb_network_header(skb)))
  81                return tcp_v4_do_rcv(sk, skb);
  82        BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
  83        return 0;
  84}
  85
  86static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
  87{
  88        struct chtls_listen *clisten;
  89
  90        if (sk->sk_protocol != IPPROTO_TCP)
  91                return -EPROTONOSUPPORT;
  92
  93        if (sk->sk_family == PF_INET &&
  94            LOOPBACK(inet_sk(sk)->inet_rcv_saddr))
  95                return -EADDRNOTAVAIL;
  96
  97        sk->sk_backlog_rcv = listen_backlog_rcv;
  98        clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
  99        if (!clisten)
 100                return -ENOMEM;
 101        clisten->cdev = cdev;
 102        clisten->sk = sk;
 103        mutex_lock(&notify_mutex);
 104        raw_notifier_call_chain(&listen_notify_list,
 105                                      CHTLS_LISTEN_START, clisten);
 106        mutex_unlock(&notify_mutex);
 107        return 0;
 108}
 109
 110static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
 111{
 112        struct chtls_listen *clisten;
 113
 114        if (sk->sk_protocol != IPPROTO_TCP)
 115                return;
 116
 117        clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
 118        if (!clisten)
 119                return;
 120        clisten->cdev = cdev;
 121        clisten->sk = sk;
 122        mutex_lock(&notify_mutex);
 123        raw_notifier_call_chain(&listen_notify_list,
 124                                CHTLS_LISTEN_STOP, clisten);
 125        mutex_unlock(&notify_mutex);
 126}
 127
 128static int chtls_inline_feature(struct tls_toe_device *dev)
 129{
 130        struct net_device *netdev;
 131        struct chtls_dev *cdev;
 132        int i;
 133
 134        cdev = to_chtls_dev(dev);
 135
 136        for (i = 0; i < cdev->lldi->nports; i++) {
 137                netdev = cdev->ports[i];
 138                if (netdev->features & NETIF_F_HW_TLS_RECORD)
 139                        return 1;
 140        }
 141        return 0;
 142}
 143
 144static int chtls_create_hash(struct tls_toe_device *dev, struct sock *sk)
 145{
 146        struct chtls_dev *cdev = to_chtls_dev(dev);
 147
 148        if (sk->sk_state == TCP_LISTEN)
 149                return chtls_start_listen(cdev, sk);
 150        return 0;
 151}
 152
 153static void chtls_destroy_hash(struct tls_toe_device *dev, struct sock *sk)
 154{
 155        struct chtls_dev *cdev = to_chtls_dev(dev);
 156
 157        if (sk->sk_state == TCP_LISTEN)
 158                chtls_stop_listen(cdev, sk);
 159}
 160
 161static void chtls_free_uld(struct chtls_dev *cdev)
 162{
 163        int i;
 164
 165        tls_toe_unregister_device(&cdev->tlsdev);
 166        kvfree(cdev->kmap.addr);
 167        idr_destroy(&cdev->hwtid_idr);
 168        for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
 169                kfree_skb(cdev->rspq_skb_cache[i]);
 170        kfree(cdev->lldi);
 171        kfree_skb(cdev->askb);
 172        kfree(cdev);
 173}
 174
 175static inline void chtls_dev_release(struct kref *kref)
 176{
 177        struct tls_toe_device *dev;
 178        struct chtls_dev *cdev;
 179        struct adapter *adap;
 180
 181        dev = container_of(kref, struct tls_toe_device, kref);
 182        cdev = to_chtls_dev(dev);
 183
 184        /* Reset tls rx/tx stats */
 185        adap = pci_get_drvdata(cdev->pdev);
 186        atomic_set(&adap->chcr_stats.tls_pdu_tx, 0);
 187        atomic_set(&adap->chcr_stats.tls_pdu_rx, 0);
 188
 189        chtls_free_uld(cdev);
 190}
 191
 192static void chtls_register_dev(struct chtls_dev *cdev)
 193{
 194        struct tls_toe_device *tlsdev = &cdev->tlsdev;
 195
 196        strlcpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX);
 197        strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
 198                TLS_TOE_DEVICE_NAME_MAX);
 199        tlsdev->feature = chtls_inline_feature;
 200        tlsdev->hash = chtls_create_hash;
 201        tlsdev->unhash = chtls_destroy_hash;
 202        tlsdev->release = chtls_dev_release;
 203        kref_init(&tlsdev->kref);
 204        tls_toe_register_device(tlsdev);
 205        cdev->cdev_state = CHTLS_CDEV_STATE_UP;
 206}
 207
 208static void process_deferq(struct work_struct *task_param)
 209{
 210        struct chtls_dev *cdev = container_of(task_param,
 211                                struct chtls_dev, deferq_task);
 212        struct sk_buff *skb;
 213
 214        spin_lock_bh(&cdev->deferq.lock);
 215        while ((skb = __skb_dequeue(&cdev->deferq)) != NULL) {
 216                spin_unlock_bh(&cdev->deferq.lock);
 217                DEFERRED_SKB_CB(skb)->handler(cdev, skb);
 218                spin_lock_bh(&cdev->deferq.lock);
 219        }
 220        spin_unlock_bh(&cdev->deferq.lock);
 221}
 222
 223static int chtls_get_skb(struct chtls_dev *cdev)
 224{
 225        cdev->askb = alloc_skb(sizeof(struct tcphdr), GFP_KERNEL);
 226        if (!cdev->askb)
 227                return -ENOMEM;
 228
 229        skb_put(cdev->askb, sizeof(struct tcphdr));
 230        skb_reset_transport_header(cdev->askb);
 231        memset(cdev->askb->data, 0, cdev->askb->len);
 232        return 0;
 233}
 234
 235static void *chtls_uld_add(const struct cxgb4_lld_info *info)
 236{
 237        struct cxgb4_lld_info *lldi;
 238        struct chtls_dev *cdev;
 239        int i, j;
 240
 241        cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
 242        if (!cdev)
 243                goto out;
 244
 245        lldi = kzalloc(sizeof(*lldi), GFP_KERNEL);
 246        if (!lldi)
 247                goto out_lldi;
 248
 249        if (chtls_get_skb(cdev))
 250                goto out_skb;
 251
 252        *lldi = *info;
 253        cdev->lldi = lldi;
 254        cdev->pdev = lldi->pdev;
 255        cdev->tids = lldi->tids;
 256        cdev->ports = lldi->ports;
 257        cdev->mtus = lldi->mtus;
 258        cdev->tids = lldi->tids;
 259        cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
 260                        << FW_VIID_PFN_S;
 261
 262        for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) {
 263                unsigned int size = 64 - sizeof(struct rsp_ctrl) - 8;
 264
 265                cdev->rspq_skb_cache[i] = __alloc_skb(size,
 266                                                      gfp_any(), 0,
 267                                                      lldi->nodeid);
 268                if (unlikely(!cdev->rspq_skb_cache[i]))
 269                        goto out_rspq_skb;
 270        }
 271
 272        idr_init(&cdev->hwtid_idr);
 273        INIT_WORK(&cdev->deferq_task, process_deferq);
 274        spin_lock_init(&cdev->listen_lock);
 275        spin_lock_init(&cdev->idr_lock);
 276        cdev->send_page_order = min_t(uint, get_order(32768),
 277                                      send_page_order);
 278        cdev->max_host_sndbuf = 48 * 1024;
 279
 280        if (lldi->vr->key.size)
 281                if (chtls_init_kmap(cdev, lldi))
 282                        goto out_rspq_skb;
 283
 284        mutex_lock(&cdev_mutex);
 285        list_add_tail(&cdev->list, &cdev_list);
 286        mutex_unlock(&cdev_mutex);
 287
 288        return cdev;
 289out_rspq_skb:
 290        for (j = 0; j < i; j++)
 291                kfree_skb(cdev->rspq_skb_cache[j]);
 292        kfree_skb(cdev->askb);
 293out_skb:
 294        kfree(lldi);
 295out_lldi:
 296        kfree(cdev);
 297out:
 298        return NULL;
 299}
 300
 301static void chtls_free_all_uld(void)
 302{
 303        struct chtls_dev *cdev, *tmp;
 304
 305        mutex_lock(&cdev_mutex);
 306        list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
 307                if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) {
 308                        list_del(&cdev->list);
 309                        kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
 310                }
 311        }
 312        mutex_unlock(&cdev_mutex);
 313}
 314
 315static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state)
 316{
 317        struct chtls_dev *cdev = handle;
 318
 319        switch (new_state) {
 320        case CXGB4_STATE_UP:
 321                chtls_register_dev(cdev);
 322                break;
 323        case CXGB4_STATE_DOWN:
 324                break;
 325        case CXGB4_STATE_START_RECOVERY:
 326                break;
 327        case CXGB4_STATE_DETACH:
 328                mutex_lock(&cdev_mutex);
 329                list_del(&cdev->list);
 330                mutex_unlock(&cdev_mutex);
 331                kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
 332                break;
 333        default:
 334                break;
 335        }
 336        return 0;
 337}
 338
 339static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
 340                                          const __be64 *rsp,
 341                                          u32 pktshift)
 342{
 343        struct sk_buff *skb;
 344
 345        /* Allocate space for cpl_pass_accpet_req which will be synthesized by
 346         * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go
 347         * through the regular cpl_pass_accept_req processing in TOM.
 348         */
 349        skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
 350                        - pktshift, GFP_ATOMIC);
 351        if (unlikely(!skb))
 352                return NULL;
 353        __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req)
 354                   - pktshift);
 355        /* For now we will copy  cpl_rx_pkt in the skb */
 356        skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_rx_pkt));
 357        skb_copy_to_linear_data_offset(skb, sizeof(struct cpl_pass_accept_req)
 358                                       , gl->va + pktshift,
 359                                       gl->tot_len - pktshift);
 360
 361        return skb;
 362}
 363
 364static int chtls_recv_packet(struct chtls_dev *cdev,
 365                             const struct pkt_gl *gl, const __be64 *rsp)
 366{
 367        unsigned int opcode = *(u8 *)rsp;
 368        struct sk_buff *skb;
 369        int ret;
 370
 371        skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift);
 372        if (!skb)
 373                return -ENOMEM;
 374
 375        ret = chtls_handlers[opcode](cdev, skb);
 376        if (ret & CPL_RET_BUF_DONE)
 377                kfree_skb(skb);
 378
 379        return 0;
 380}
 381
 382static int chtls_recv_rsp(struct chtls_dev *cdev, const __be64 *rsp)
 383{
 384        unsigned long rspq_bin;
 385        unsigned int opcode;
 386        struct sk_buff *skb;
 387        unsigned int len;
 388        int ret;
 389
 390        len = 64 - sizeof(struct rsp_ctrl) - 8;
 391        opcode = *(u8 *)rsp;
 392
 393        rspq_bin = hash_ptr((void *)rsp, RSPQ_HASH_BITS);
 394        skb = cdev->rspq_skb_cache[rspq_bin];
 395        if (skb && !skb_is_nonlinear(skb) &&
 396            !skb_shared(skb) && !skb_cloned(skb)) {
 397                refcount_inc(&skb->users);
 398                if (refcount_read(&skb->users) == 2) {
 399                        __skb_trim(skb, 0);
 400                        if (skb_tailroom(skb) >= len)
 401                                goto copy_out;
 402                }
 403                refcount_dec(&skb->users);
 404        }
 405        skb = alloc_skb(len, GFP_ATOMIC);
 406        if (unlikely(!skb))
 407                return -ENOMEM;
 408
 409copy_out:
 410        __skb_put(skb, len);
 411        skb_copy_to_linear_data(skb, rsp, len);
 412        skb_reset_network_header(skb);
 413        skb_reset_transport_header(skb);
 414        ret = chtls_handlers[opcode](cdev, skb);
 415
 416        if (ret & CPL_RET_BUF_DONE)
 417                kfree_skb(skb);
 418        return 0;
 419}
 420
 421static void chtls_recv(struct chtls_dev *cdev,
 422                       struct sk_buff **skbs, const __be64 *rsp)
 423{
 424        struct sk_buff *skb = *skbs;
 425        unsigned int opcode;
 426        int ret;
 427
 428        opcode = *(u8 *)rsp;
 429
 430        __skb_push(skb, sizeof(struct rss_header));
 431        skb_copy_to_linear_data(skb, rsp, sizeof(struct rss_header));
 432
 433        ret = chtls_handlers[opcode](cdev, skb);
 434        if (ret & CPL_RET_BUF_DONE)
 435                kfree_skb(skb);
 436}
 437
 438static int chtls_uld_rx_handler(void *handle, const __be64 *rsp,
 439                                const struct pkt_gl *gl)
 440{
 441        struct chtls_dev *cdev = handle;
 442        unsigned int opcode;
 443        struct sk_buff *skb;
 444
 445        opcode = *(u8 *)rsp;
 446
 447        if (unlikely(opcode == CPL_RX_PKT)) {
 448                if (chtls_recv_packet(cdev, gl, rsp) < 0)
 449                        goto nomem;
 450                return 0;
 451        }
 452
 453        if (!gl)
 454                return chtls_recv_rsp(cdev, rsp);
 455
 456#define RX_PULL_LEN 128
 457        skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
 458        if (unlikely(!skb))
 459                goto nomem;
 460        chtls_recv(cdev, &skb, rsp);
 461        return 0;
 462
 463nomem:
 464        return -ENOMEM;
 465}
 466
 467static int do_chtls_getsockopt(struct sock *sk, char __user *optval,
 468                               int __user *optlen)
 469{
 470        struct tls_crypto_info crypto_info = { 0 };
 471
 472        crypto_info.version = TLS_1_2_VERSION;
 473        if (copy_to_user(optval, &crypto_info, sizeof(struct tls_crypto_info)))
 474                return -EFAULT;
 475        return 0;
 476}
 477
 478static int chtls_getsockopt(struct sock *sk, int level, int optname,
 479                            char __user *optval, int __user *optlen)
 480{
 481        struct tls_context *ctx = tls_get_ctx(sk);
 482
 483        if (level != SOL_TLS)
 484                return ctx->sk_proto->getsockopt(sk, level,
 485                                                 optname, optval, optlen);
 486
 487        return do_chtls_getsockopt(sk, optval, optlen);
 488}
 489
 490static int do_chtls_setsockopt(struct sock *sk, int optname,
 491                               sockptr_t optval, unsigned int optlen)
 492{
 493        struct tls_crypto_info *crypto_info, tmp_crypto_info;
 494        struct chtls_sock *csk;
 495        int keylen;
 496        int cipher_type;
 497        int rc = 0;
 498
 499        csk = rcu_dereference_sk_user_data(sk);
 500
 501        if (sockptr_is_null(optval) || optlen < sizeof(*crypto_info)) {
 502                rc = -EINVAL;
 503                goto out;
 504        }
 505
 506        rc = copy_from_sockptr(&tmp_crypto_info, optval, sizeof(*crypto_info));
 507        if (rc) {
 508                rc = -EFAULT;
 509                goto out;
 510        }
 511
 512        /* check version */
 513        if (tmp_crypto_info.version != TLS_1_2_VERSION) {
 514                rc = -ENOTSUPP;
 515                goto out;
 516        }
 517
 518        crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info;
 519
 520        /* GCM mode of AES supports 128 and 256 bit encryption, so
 521         * copy keys from user based on GCM cipher type.
 522         */
 523        switch (tmp_crypto_info.cipher_type) {
 524        case TLS_CIPHER_AES_GCM_128: {
 525                /* Obtain version and type from previous copy */
 526                crypto_info[0] = tmp_crypto_info;
 527                /* Now copy the following data */
 528                rc = copy_from_sockptr_offset((char *)crypto_info +
 529                                sizeof(*crypto_info),
 530                                optval, sizeof(*crypto_info),
 531                                sizeof(struct tls12_crypto_info_aes_gcm_128)
 532                                - sizeof(*crypto_info));
 533
 534                if (rc) {
 535                        rc = -EFAULT;
 536                        goto out;
 537                }
 538
 539                keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
 540                cipher_type = TLS_CIPHER_AES_GCM_128;
 541                break;
 542        }
 543        case TLS_CIPHER_AES_GCM_256: {
 544                crypto_info[0] = tmp_crypto_info;
 545                rc = copy_from_sockptr_offset((char *)crypto_info +
 546                                sizeof(*crypto_info),
 547                                optval, sizeof(*crypto_info),
 548                                sizeof(struct tls12_crypto_info_aes_gcm_256)
 549                                - sizeof(*crypto_info));
 550
 551                if (rc) {
 552                        rc = -EFAULT;
 553                        goto out;
 554                }
 555
 556                keylen = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
 557                cipher_type = TLS_CIPHER_AES_GCM_256;
 558                break;
 559        }
 560        default:
 561                rc = -EINVAL;
 562                goto out;
 563        }
 564        rc = chtls_setkey(csk, keylen, optname, cipher_type);
 565out:
 566        return rc;
 567}
 568
 569static int chtls_setsockopt(struct sock *sk, int level, int optname,
 570                            sockptr_t optval, unsigned int optlen)
 571{
 572        struct tls_context *ctx = tls_get_ctx(sk);
 573
 574        if (level != SOL_TLS)
 575                return ctx->sk_proto->setsockopt(sk, level,
 576                                                 optname, optval, optlen);
 577
 578        return do_chtls_setsockopt(sk, optname, optval, optlen);
 579}
 580
 581static struct cxgb4_uld_info chtls_uld_info = {
 582        .name = DRV_NAME,
 583        .nrxq = MAX_ULD_QSETS,
 584        .ntxq = MAX_ULD_QSETS,
 585        .rxq_size = 1024,
 586        .add = chtls_uld_add,
 587        .state_change = chtls_uld_state_change,
 588        .rx_handler = chtls_uld_rx_handler,
 589};
 590
 591void chtls_install_cpl_ops(struct sock *sk)
 592{
 593        if (sk->sk_family == AF_INET)
 594                sk->sk_prot = &chtls_cpl_prot;
 595        else
 596                sk->sk_prot = &chtls_cpl_protv6;
 597}
 598
 599static void __init chtls_init_ulp_ops(void)
 600{
 601        chtls_cpl_prot                  = tcp_prot;
 602        chtls_init_rsk_ops(&chtls_cpl_prot, &chtls_rsk_ops,
 603                           &tcp_prot, PF_INET);
 604        chtls_cpl_prot.close            = chtls_close;
 605        chtls_cpl_prot.disconnect       = chtls_disconnect;
 606        chtls_cpl_prot.destroy          = chtls_destroy_sock;
 607        chtls_cpl_prot.shutdown         = chtls_shutdown;
 608        chtls_cpl_prot.sendmsg          = chtls_sendmsg;
 609        chtls_cpl_prot.sendpage         = chtls_sendpage;
 610        chtls_cpl_prot.recvmsg          = chtls_recvmsg;
 611        chtls_cpl_prot.setsockopt       = chtls_setsockopt;
 612        chtls_cpl_prot.getsockopt       = chtls_getsockopt;
 613#if IS_ENABLED(CONFIG_IPV6)
 614        chtls_cpl_protv6                = chtls_cpl_prot;
 615        chtls_init_rsk_ops(&chtls_cpl_protv6, &chtls_rsk_opsv6,
 616                           &tcpv6_prot, PF_INET6);
 617#endif
 618}
 619
 620static int __init chtls_register(void)
 621{
 622        chtls_init_ulp_ops();
 623        register_listen_notifier(&listen_notifier);
 624        cxgb4_register_uld(CXGB4_ULD_TLS, &chtls_uld_info);
 625        return 0;
 626}
 627
 628static void __exit chtls_unregister(void)
 629{
 630        unregister_listen_notifier(&listen_notifier);
 631        chtls_free_all_uld();
 632        cxgb4_unregister_uld(CXGB4_ULD_TLS);
 633}
 634
 635module_init(chtls_register);
 636module_exit(chtls_unregister);
 637
 638MODULE_DESCRIPTION("Chelsio TLS Inline driver");
 639MODULE_LICENSE("GPL");
 640MODULE_AUTHOR("Chelsio Communications");
 641MODULE_VERSION(CHTLS_DRV_VERSION);
 642