linux/drivers/target/iscsi/cxgbit/cxgbit_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2016 Chelsio Communications, Inc.
   4 */
   5
   6#define DRV_NAME "cxgbit"
   7#define DRV_VERSION "1.0.0-ko"
   8#define pr_fmt(fmt) DRV_NAME ": " fmt
   9
  10#include "cxgbit.h"
  11
  12#ifdef CONFIG_CHELSIO_T4_DCB
  13#include <net/dcbevent.h>
  14#include "cxgb4_dcb.h"
  15#endif
  16
  17LIST_HEAD(cdev_list_head);
  18/* cdev list lock */
  19DEFINE_MUTEX(cdev_list_lock);
  20
  21void _cxgbit_free_cdev(struct kref *kref)
  22{
  23        struct cxgbit_device *cdev;
  24
  25        cdev = container_of(kref, struct cxgbit_device, kref);
  26
  27        cxgbi_ppm_release(cdev2ppm(cdev));
  28        kfree(cdev);
  29}
  30
  31static void cxgbit_set_mdsl(struct cxgbit_device *cdev)
  32{
  33        struct cxgb4_lld_info *lldi = &cdev->lldi;
  34        u32 mdsl;
  35
  36#define ULP2_MAX_PKT_LEN 16224
  37#define ISCSI_PDU_NONPAYLOAD_LEN 312
  38        mdsl = min_t(u32, lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN,
  39                     ULP2_MAX_PKT_LEN - ISCSI_PDU_NONPAYLOAD_LEN);
  40        mdsl = min_t(u32, mdsl, 8192);
  41        mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE);
  42
  43        cdev->mdsl = mdsl;
  44}
  45
  46static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi)
  47{
  48        struct cxgbit_device *cdev;
  49
  50        if (is_t4(lldi->adapter_type))
  51                return ERR_PTR(-ENODEV);
  52
  53        cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
  54        if (!cdev)
  55                return ERR_PTR(-ENOMEM);
  56
  57        kref_init(&cdev->kref);
  58        spin_lock_init(&cdev->np_lock);
  59
  60        cdev->lldi = *lldi;
  61
  62        cxgbit_set_mdsl(cdev);
  63
  64        if (cxgbit_ddp_init(cdev) < 0) {
  65                kfree(cdev);
  66                return ERR_PTR(-EINVAL);
  67        }
  68
  69        if (!test_bit(CDEV_DDP_ENABLE, &cdev->flags))
  70                pr_info("cdev %s ddp init failed\n",
  71                        pci_name(lldi->pdev));
  72
  73        if (lldi->fw_vers >= 0x10d2b00)
  74                set_bit(CDEV_ISO_ENABLE, &cdev->flags);
  75
  76        spin_lock_init(&cdev->cskq.lock);
  77        INIT_LIST_HEAD(&cdev->cskq.list);
  78
  79        mutex_lock(&cdev_list_lock);
  80        list_add_tail(&cdev->list, &cdev_list_head);
  81        mutex_unlock(&cdev_list_lock);
  82
  83        pr_info("cdev %s added for iSCSI target transport\n",
  84                pci_name(lldi->pdev));
  85
  86        return cdev;
  87}
  88
  89static void cxgbit_close_conn(struct cxgbit_device *cdev)
  90{
  91        struct cxgbit_sock *csk;
  92        struct sk_buff *skb;
  93        bool wakeup_thread = false;
  94
  95        spin_lock_bh(&cdev->cskq.lock);
  96        list_for_each_entry(csk, &cdev->cskq.list, list) {
  97                skb = alloc_skb(0, GFP_ATOMIC);
  98                if (!skb)
  99                        continue;
 100
 101                spin_lock_bh(&csk->rxq.lock);
 102                __skb_queue_tail(&csk->rxq, skb);
 103                if (skb_queue_len(&csk->rxq) == 1)
 104                        wakeup_thread = true;
 105                spin_unlock_bh(&csk->rxq.lock);
 106
 107                if (wakeup_thread) {
 108                        wake_up(&csk->waitq);
 109                        wakeup_thread = false;
 110                }
 111        }
 112        spin_unlock_bh(&cdev->cskq.lock);
 113}
 114
 115static void cxgbit_detach_cdev(struct cxgbit_device *cdev)
 116{
 117        bool free_cdev = false;
 118
 119        spin_lock_bh(&cdev->cskq.lock);
 120        if (list_empty(&cdev->cskq.list))
 121                free_cdev = true;
 122        spin_unlock_bh(&cdev->cskq.lock);
 123
 124        if (free_cdev) {
 125                mutex_lock(&cdev_list_lock);
 126                list_del(&cdev->list);
 127                mutex_unlock(&cdev_list_lock);
 128
 129                cxgbit_put_cdev(cdev);
 130        } else {
 131                cxgbit_close_conn(cdev);
 132        }
 133}
 134
 135static int cxgbit_uld_state_change(void *handle, enum cxgb4_state state)
 136{
 137        struct cxgbit_device *cdev = handle;
 138
 139        switch (state) {
 140        case CXGB4_STATE_UP:
 141                set_bit(CDEV_STATE_UP, &cdev->flags);
 142                pr_info("cdev %s state UP.\n", pci_name(cdev->lldi.pdev));
 143                break;
 144        case CXGB4_STATE_START_RECOVERY:
 145                clear_bit(CDEV_STATE_UP, &cdev->flags);
 146                cxgbit_close_conn(cdev);
 147                pr_info("cdev %s state RECOVERY.\n", pci_name(cdev->lldi.pdev));
 148                break;
 149        case CXGB4_STATE_DOWN:
 150                pr_info("cdev %s state DOWN.\n", pci_name(cdev->lldi.pdev));
 151                break;
 152        case CXGB4_STATE_DETACH:
 153                clear_bit(CDEV_STATE_UP, &cdev->flags);
 154                pr_info("cdev %s state DETACH.\n", pci_name(cdev->lldi.pdev));
 155                cxgbit_detach_cdev(cdev);
 156                break;
 157        default:
 158                pr_info("cdev %s unknown state %d.\n",
 159                        pci_name(cdev->lldi.pdev), state);
 160                break;
 161        }
 162        return 0;
 163}
 164
 165static void
 166cxgbit_process_ddpvld(struct cxgbit_sock *csk, struct cxgbit_lro_pdu_cb *pdu_cb,
 167                      u32 ddpvld)
 168{
 169
 170        if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) {
 171                pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", csk->tid, ddpvld);
 172                pdu_cb->flags |= PDUCBF_RX_HCRC_ERR;
 173        }
 174
 175        if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) {
 176                pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", csk->tid, ddpvld);
 177                pdu_cb->flags |= PDUCBF_RX_DCRC_ERR;
 178        }
 179
 180        if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT))
 181                pr_info("tid 0x%x, status 0x%x, pad bad.\n", csk->tid, ddpvld);
 182
 183        if ((ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) &&
 184            (!(pdu_cb->flags & PDUCBF_RX_DATA))) {
 185                pdu_cb->flags |= PDUCBF_RX_DATA_DDPD;
 186        }
 187}
 188
 189static void
 190cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp)
 191{
 192        struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
 193        struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
 194                                                lro_cb->pdu_idx);
 195        struct cpl_rx_iscsi_ddp *cpl = (struct cpl_rx_iscsi_ddp *)(rsp + 1);
 196
 197        cxgbit_process_ddpvld(lro_cb->csk, pdu_cb, be32_to_cpu(cpl->ddpvld));
 198
 199        pdu_cb->flags |= PDUCBF_RX_STATUS;
 200        pdu_cb->ddigest = ntohl(cpl->ulp_crc);
 201        pdu_cb->pdulen = ntohs(cpl->len);
 202
 203        if (pdu_cb->flags & PDUCBF_RX_HDR)
 204                pdu_cb->complete = true;
 205
 206        lro_cb->pdu_totallen += pdu_cb->pdulen;
 207        lro_cb->complete = true;
 208        lro_cb->pdu_idx++;
 209}
 210
 211static void
 212cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl,
 213                  unsigned int offset)
 214{
 215        u8 skb_frag_idx = skb_shinfo(skb)->nr_frags;
 216        u8 i;
 217
 218        /* usually there's just one frag */
 219        __skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page,
 220                             gl->frags[0].offset + offset,
 221                             gl->frags[0].size - offset);
 222        for (i = 1; i < gl->nfrags; i++)
 223                __skb_fill_page_desc(skb, skb_frag_idx + i,
 224                                     gl->frags[i].page,
 225                                     gl->frags[i].offset,
 226                                     gl->frags[i].size);
 227
 228        skb_shinfo(skb)->nr_frags += gl->nfrags;
 229
 230        /* get a reference to the last page, we don't own it */
 231        get_page(gl->frags[gl->nfrags - 1].page);
 232}
 233
 234static void
 235cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl)
 236{
 237        struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
 238        struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
 239                                                lro_cb->pdu_idx);
 240        u32 len, offset;
 241
 242        if (op == CPL_ISCSI_HDR) {
 243                struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)gl->va;
 244
 245                offset = sizeof(struct cpl_iscsi_hdr);
 246                pdu_cb->flags |= PDUCBF_RX_HDR;
 247                pdu_cb->seq = ntohl(cpl->seq);
 248                len = ntohs(cpl->len);
 249                pdu_cb->hdr = gl->va + offset;
 250                pdu_cb->hlen = len;
 251                pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags;
 252
 253                if (unlikely(gl->nfrags > 1))
 254                        cxgbit_skcb_flags(skb) = 0;
 255
 256                lro_cb->complete = false;
 257        } else if (op == CPL_ISCSI_DATA) {
 258                struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va;
 259
 260                offset = sizeof(struct cpl_iscsi_data);
 261                pdu_cb->flags |= PDUCBF_RX_DATA;
 262                len = ntohs(cpl->len);
 263                pdu_cb->dlen = len;
 264                pdu_cb->doffset = lro_cb->offset;
 265                pdu_cb->nr_dfrags = gl->nfrags;
 266                pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags;
 267                lro_cb->complete = false;
 268        } else {
 269                struct cpl_rx_iscsi_cmp *cpl;
 270
 271                cpl = (struct cpl_rx_iscsi_cmp *)gl->va;
 272                offset = sizeof(struct cpl_rx_iscsi_cmp);
 273                pdu_cb->flags |= (PDUCBF_RX_HDR | PDUCBF_RX_STATUS);
 274                len = be16_to_cpu(cpl->len);
 275                pdu_cb->hdr = gl->va + offset;
 276                pdu_cb->hlen = len;
 277                pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags;
 278                pdu_cb->ddigest = be32_to_cpu(cpl->ulp_crc);
 279                pdu_cb->pdulen = ntohs(cpl->len);
 280
 281                if (unlikely(gl->nfrags > 1))
 282                        cxgbit_skcb_flags(skb) = 0;
 283
 284                cxgbit_process_ddpvld(lro_cb->csk, pdu_cb,
 285                                      be32_to_cpu(cpl->ddpvld));
 286
 287                if (pdu_cb->flags & PDUCBF_RX_DATA_DDPD) {
 288                        pdu_cb->flags |= PDUCBF_RX_DDP_CMP;
 289                        pdu_cb->complete = true;
 290                } else if (pdu_cb->flags & PDUCBF_RX_DATA) {
 291                        pdu_cb->complete = true;
 292                }
 293
 294                lro_cb->pdu_totallen += pdu_cb->hlen + pdu_cb->dlen;
 295                lro_cb->complete = true;
 296                lro_cb->pdu_idx++;
 297        }
 298
 299        cxgbit_copy_frags(skb, gl, offset);
 300
 301        pdu_cb->frags += gl->nfrags;
 302        lro_cb->offset += len;
 303        skb->len += len;
 304        skb->data_len += len;
 305        skb->truesize += len;
 306}
 307
 308static struct sk_buff *
 309cxgbit_lro_init_skb(struct cxgbit_sock *csk, u8 op, const struct pkt_gl *gl,
 310                    const __be64 *rsp, struct napi_struct *napi)
 311{
 312        struct sk_buff *skb;
 313        struct cxgbit_lro_cb *lro_cb;
 314
 315        skb = napi_alloc_skb(napi, LRO_SKB_MAX_HEADROOM);
 316
 317        if (unlikely(!skb))
 318                return NULL;
 319
 320        memset(skb->data, 0, LRO_SKB_MAX_HEADROOM);
 321
 322        cxgbit_skcb_flags(skb) |= SKCBF_RX_LRO;
 323
 324        lro_cb = cxgbit_skb_lro_cb(skb);
 325
 326        cxgbit_get_csk(csk);
 327
 328        lro_cb->csk = csk;
 329
 330        return skb;
 331}
 332
 333static void cxgbit_queue_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
 334{
 335        bool wakeup_thread = false;
 336
 337        spin_lock(&csk->rxq.lock);
 338        __skb_queue_tail(&csk->rxq, skb);
 339        if (skb_queue_len(&csk->rxq) == 1)
 340                wakeup_thread = true;
 341        spin_unlock(&csk->rxq.lock);
 342
 343        if (wakeup_thread)
 344                wake_up(&csk->waitq);
 345}
 346
 347static void cxgbit_lro_flush(struct t4_lro_mgr *lro_mgr, struct sk_buff *skb)
 348{
 349        struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
 350        struct cxgbit_sock *csk = lro_cb->csk;
 351
 352        csk->lro_skb = NULL;
 353
 354        __skb_unlink(skb, &lro_mgr->lroq);
 355        cxgbit_queue_lro_skb(csk, skb);
 356
 357        cxgbit_put_csk(csk);
 358
 359        lro_mgr->lro_pkts++;
 360        lro_mgr->lro_session_cnt--;
 361}
 362
 363static void cxgbit_uld_lro_flush(struct t4_lro_mgr *lro_mgr)
 364{
 365        struct sk_buff *skb;
 366
 367        while ((skb = skb_peek(&lro_mgr->lroq)))
 368                cxgbit_lro_flush(lro_mgr, skb);
 369}
 370
 371static int
 372cxgbit_lro_receive(struct cxgbit_sock *csk, u8 op, const __be64 *rsp,
 373                   const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
 374                   struct napi_struct *napi)
 375{
 376        struct sk_buff *skb;
 377        struct cxgbit_lro_cb *lro_cb;
 378
 379        if (!csk) {
 380                pr_err("%s: csk NULL, op 0x%x.\n", __func__, op);
 381                goto out;
 382        }
 383
 384        if (csk->lro_skb)
 385                goto add_packet;
 386
 387start_lro:
 388        if (lro_mgr->lro_session_cnt >= MAX_LRO_SESSIONS) {
 389                cxgbit_uld_lro_flush(lro_mgr);
 390                goto start_lro;
 391        }
 392
 393        skb = cxgbit_lro_init_skb(csk, op, gl, rsp, napi);
 394        if (unlikely(!skb))
 395                goto out;
 396
 397        csk->lro_skb = skb;
 398
 399        __skb_queue_tail(&lro_mgr->lroq, skb);
 400        lro_mgr->lro_session_cnt++;
 401
 402add_packet:
 403        skb = csk->lro_skb;
 404        lro_cb = cxgbit_skb_lro_cb(skb);
 405
 406        if ((gl && (((skb_shinfo(skb)->nr_frags + gl->nfrags) >
 407            MAX_SKB_FRAGS) || (lro_cb->pdu_totallen >= LRO_FLUSH_LEN_MAX))) ||
 408            (lro_cb->pdu_idx >= MAX_SKB_FRAGS)) {
 409                cxgbit_lro_flush(lro_mgr, skb);
 410                goto start_lro;
 411        }
 412
 413        if (gl)
 414                cxgbit_lro_add_packet_gl(skb, op, gl);
 415        else
 416                cxgbit_lro_add_packet_rsp(skb, op, rsp);
 417
 418        lro_mgr->lro_merged++;
 419
 420        return 0;
 421
 422out:
 423        return -1;
 424}
 425
 426static int
 427cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
 428                          const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
 429                          struct napi_struct *napi)
 430{
 431        struct cxgbit_device *cdev = hndl;
 432        struct cxgb4_lld_info *lldi = &cdev->lldi;
 433        struct cpl_tx_data *rpl = NULL;
 434        struct cxgbit_sock *csk = NULL;
 435        unsigned int tid = 0;
 436        struct sk_buff *skb;
 437        unsigned int op = *(u8 *)rsp;
 438        bool lro_flush = true;
 439
 440        switch (op) {
 441        case CPL_ISCSI_HDR:
 442        case CPL_ISCSI_DATA:
 443        case CPL_RX_ISCSI_CMP:
 444        case CPL_RX_ISCSI_DDP:
 445        case CPL_FW4_ACK:
 446                lro_flush = false;
 447                /* fall through */
 448        case CPL_ABORT_RPL_RSS:
 449        case CPL_PASS_ESTABLISH:
 450        case CPL_PEER_CLOSE:
 451        case CPL_CLOSE_CON_RPL:
 452        case CPL_ABORT_REQ_RSS:
 453        case CPL_SET_TCB_RPL:
 454        case CPL_RX_DATA:
 455                rpl = gl ? (struct cpl_tx_data *)gl->va :
 456                           (struct cpl_tx_data *)(rsp + 1);
 457                tid = GET_TID(rpl);
 458                csk = lookup_tid(lldi->tids, tid);
 459                break;
 460        default:
 461                break;
 462        }
 463
 464        if (csk && csk->lro_skb && lro_flush)
 465                cxgbit_lro_flush(lro_mgr, csk->lro_skb);
 466
 467        if (!gl) {
 468                unsigned int len;
 469
 470                if (op == CPL_RX_ISCSI_DDP) {
 471                        if (!cxgbit_lro_receive(csk, op, rsp, NULL, lro_mgr,
 472                                                napi))
 473                                return 0;
 474                }
 475
 476                len = 64 - sizeof(struct rsp_ctrl) - 8;
 477                skb = napi_alloc_skb(napi, len);
 478                if (!skb)
 479                        goto nomem;
 480                __skb_put(skb, len);
 481                skb_copy_to_linear_data(skb, &rsp[1], len);
 482        } else {
 483                if (unlikely(op != *(u8 *)gl->va)) {
 484                        pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
 485                                gl->va, be64_to_cpu(*rsp),
 486                                get_unaligned_be64(gl->va),
 487                                gl->tot_len);
 488                        return 0;
 489                }
 490
 491                if ((op == CPL_ISCSI_HDR) || (op == CPL_ISCSI_DATA) ||
 492                    (op == CPL_RX_ISCSI_CMP)) {
 493                        if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr,
 494                                                napi))
 495                                return 0;
 496                }
 497
 498#define RX_PULL_LEN 128
 499                skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
 500                if (unlikely(!skb))
 501                        goto nomem;
 502        }
 503
 504        rpl = (struct cpl_tx_data *)skb->data;
 505        op = rpl->ot.opcode;
 506        cxgbit_skcb_rx_opcode(skb) = op;
 507
 508        pr_debug("cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
 509                 cdev, op, rpl->ot.opcode_tid,
 510                 ntohl(rpl->ot.opcode_tid), skb);
 511
 512        if (op < NUM_CPL_CMDS && cxgbit_cplhandlers[op]) {
 513                cxgbit_cplhandlers[op](cdev, skb);
 514        } else {
 515                pr_err("No handler for opcode 0x%x.\n", op);
 516                __kfree_skb(skb);
 517        }
 518        return 0;
 519nomem:
 520        pr_err("%s OOM bailing out.\n", __func__);
 521        return 1;
 522}
 523
 524#ifdef CONFIG_CHELSIO_T4_DCB
 525struct cxgbit_dcb_work {
 526        struct dcb_app_type dcb_app;
 527        struct work_struct work;
 528};
 529
 530static void
 531cxgbit_update_dcb_priority(struct cxgbit_device *cdev, u8 port_id,
 532                           u8 dcb_priority, u16 port_num)
 533{
 534        struct cxgbit_sock *csk;
 535        struct sk_buff *skb;
 536        u16 local_port;
 537        bool wakeup_thread = false;
 538
 539        spin_lock_bh(&cdev->cskq.lock);
 540        list_for_each_entry(csk, &cdev->cskq.list, list) {
 541                if (csk->port_id != port_id)
 542                        continue;
 543
 544                if (csk->com.local_addr.ss_family == AF_INET6) {
 545                        struct sockaddr_in6 *sock_in6;
 546
 547                        sock_in6 = (struct sockaddr_in6 *)&csk->com.local_addr;
 548                        local_port = ntohs(sock_in6->sin6_port);
 549                } else {
 550                        struct sockaddr_in *sock_in;
 551
 552                        sock_in = (struct sockaddr_in *)&csk->com.local_addr;
 553                        local_port = ntohs(sock_in->sin_port);
 554                }
 555
 556                if (local_port != port_num)
 557                        continue;
 558
 559                if (csk->dcb_priority == dcb_priority)
 560                        continue;
 561
 562                skb = alloc_skb(0, GFP_ATOMIC);
 563                if (!skb)
 564                        continue;
 565
 566                spin_lock(&csk->rxq.lock);
 567                __skb_queue_tail(&csk->rxq, skb);
 568                if (skb_queue_len(&csk->rxq) == 1)
 569                        wakeup_thread = true;
 570                spin_unlock(&csk->rxq.lock);
 571
 572                if (wakeup_thread) {
 573                        wake_up(&csk->waitq);
 574                        wakeup_thread = false;
 575                }
 576        }
 577        spin_unlock_bh(&cdev->cskq.lock);
 578}
 579
 580static void cxgbit_dcb_workfn(struct work_struct *work)
 581{
 582        struct cxgbit_dcb_work *dcb_work;
 583        struct net_device *ndev;
 584        struct cxgbit_device *cdev = NULL;
 585        struct dcb_app_type *iscsi_app;
 586        u8 priority, port_id = 0xff;
 587
 588        dcb_work = container_of(work, struct cxgbit_dcb_work, work);
 589        iscsi_app = &dcb_work->dcb_app;
 590
 591        if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
 592                if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) &&
 593                    (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY))
 594                        goto out;
 595
 596                priority = iscsi_app->app.priority;
 597
 598        } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
 599                if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
 600                        goto out;
 601
 602                if (!iscsi_app->app.priority)
 603                        goto out;
 604
 605                priority = ffs(iscsi_app->app.priority) - 1;
 606        } else {
 607                goto out;
 608        }
 609
 610        pr_debug("priority for ifid %d is %u\n",
 611                 iscsi_app->ifindex, priority);
 612
 613        ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
 614
 615        if (!ndev)
 616                goto out;
 617
 618        mutex_lock(&cdev_list_lock);
 619        cdev = cxgbit_find_device(ndev, &port_id);
 620
 621        dev_put(ndev);
 622
 623        if (!cdev) {
 624                mutex_unlock(&cdev_list_lock);
 625                goto out;
 626        }
 627
 628        cxgbit_update_dcb_priority(cdev, port_id, priority,
 629                                   iscsi_app->app.protocol);
 630        mutex_unlock(&cdev_list_lock);
 631out:
 632        kfree(dcb_work);
 633}
 634
 635static int
 636cxgbit_dcbevent_notify(struct notifier_block *nb, unsigned long action,
 637                       void *data)
 638{
 639        struct cxgbit_dcb_work *dcb_work;
 640        struct dcb_app_type *dcb_app = data;
 641
 642        dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC);
 643        if (!dcb_work)
 644                return NOTIFY_DONE;
 645
 646        dcb_work->dcb_app = *dcb_app;
 647        INIT_WORK(&dcb_work->work, cxgbit_dcb_workfn);
 648        schedule_work(&dcb_work->work);
 649        return NOTIFY_OK;
 650}
 651#endif
 652
 653static enum target_prot_op cxgbit_get_sup_prot_ops(struct iscsi_conn *conn)
 654{
 655        return TARGET_PROT_NORMAL;
 656}
 657
 658static struct iscsit_transport cxgbit_transport = {
 659        .name                   = DRV_NAME,
 660        .transport_type         = ISCSI_CXGBIT,
 661        .rdma_shutdown          = false,
 662        .priv_size              = sizeof(struct cxgbit_cmd),
 663        .owner                  = THIS_MODULE,
 664        .iscsit_setup_np        = cxgbit_setup_np,
 665        .iscsit_accept_np       = cxgbit_accept_np,
 666        .iscsit_free_np         = cxgbit_free_np,
 667        .iscsit_free_conn       = cxgbit_free_conn,
 668        .iscsit_get_login_rx    = cxgbit_get_login_rx,
 669        .iscsit_put_login_tx    = cxgbit_put_login_tx,
 670        .iscsit_immediate_queue = iscsit_immediate_queue,
 671        .iscsit_response_queue  = iscsit_response_queue,
 672        .iscsit_get_dataout     = iscsit_build_r2ts_for_cmd,
 673        .iscsit_queue_data_in   = iscsit_queue_rsp,
 674        .iscsit_queue_status    = iscsit_queue_rsp,
 675        .iscsit_xmit_pdu        = cxgbit_xmit_pdu,
 676        .iscsit_get_r2t_ttt     = cxgbit_get_r2t_ttt,
 677        .iscsit_get_rx_pdu      = cxgbit_get_rx_pdu,
 678        .iscsit_validate_params = cxgbit_validate_params,
 679        .iscsit_unmap_cmd       = cxgbit_unmap_cmd,
 680        .iscsit_aborted_task    = iscsit_aborted_task,
 681        .iscsit_get_sup_prot_ops = cxgbit_get_sup_prot_ops,
 682};
 683
 684static struct cxgb4_uld_info cxgbit_uld_info = {
 685        .name           = DRV_NAME,
 686        .nrxq           = MAX_ULD_QSETS,
 687        .ntxq           = MAX_ULD_QSETS,
 688        .rxq_size       = 1024,
 689        .lro            = true,
 690        .add            = cxgbit_uld_add,
 691        .state_change   = cxgbit_uld_state_change,
 692        .lro_rx_handler = cxgbit_uld_lro_rx_handler,
 693        .lro_flush      = cxgbit_uld_lro_flush,
 694};
 695
 696#ifdef CONFIG_CHELSIO_T4_DCB
 697static struct notifier_block cxgbit_dcbevent_nb = {
 698        .notifier_call = cxgbit_dcbevent_notify,
 699};
 700#endif
 701
 702static int __init cxgbit_init(void)
 703{
 704        cxgb4_register_uld(CXGB4_ULD_ISCSIT, &cxgbit_uld_info);
 705        iscsit_register_transport(&cxgbit_transport);
 706
 707#ifdef CONFIG_CHELSIO_T4_DCB
 708        pr_info("%s dcb enabled.\n", DRV_NAME);
 709        register_dcbevent_notifier(&cxgbit_dcbevent_nb);
 710#endif
 711        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) <
 712                     sizeof(union cxgbit_skb_cb));
 713        return 0;
 714}
 715
 716static void __exit cxgbit_exit(void)
 717{
 718        struct cxgbit_device *cdev, *tmp;
 719
 720#ifdef CONFIG_CHELSIO_T4_DCB
 721        unregister_dcbevent_notifier(&cxgbit_dcbevent_nb);
 722#endif
 723        mutex_lock(&cdev_list_lock);
 724        list_for_each_entry_safe(cdev, tmp, &cdev_list_head, list) {
 725                list_del(&cdev->list);
 726                cxgbit_put_cdev(cdev);
 727        }
 728        mutex_unlock(&cdev_list_lock);
 729        iscsit_unregister_transport(&cxgbit_transport);
 730        cxgb4_unregister_uld(CXGB4_ULD_ISCSIT);
 731}
 732
 733module_init(cxgbit_init);
 734module_exit(cxgbit_exit);
 735
 736MODULE_DESCRIPTION("Chelsio iSCSI target offload driver");
 737MODULE_AUTHOR("Chelsio Communications");
 738MODULE_VERSION(DRV_VERSION);
 739MODULE_LICENSE("GPL");
 740