linux/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2021, MediaTek Inc.
   4 * Copyright (c) 2021-2022, Intel Corporation.
   5 *
   6 * Authors:
   7 *  Amir Hanania <amir.hanania@intel.com>
   8 *  Haijun Liu <haijun.liu@mediatek.com>
   9 *  Moises Veleta <moises.veleta@intel.com>
  10 *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
  11 *
  12 * Contributors:
  13 *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
  14 *  Eliot Lee <eliot.lee@intel.com>
  15 *  Sreehari Kancharla <sreehari.kancharla@intel.com>
  16 */
  17
  18#include <linux/device.h>
  19#include <linux/gfp.h>
  20#include <linux/irqreturn.h>
  21#include <linux/kernel.h>
  22#include <linux/list.h>
  23#include <linux/string.h>
  24#include <linux/wait.h>
  25#include <linux/workqueue.h>
  26
  27#include "t7xx_dpmaif.h"
  28#include "t7xx_hif_dpmaif.h"
  29#include "t7xx_hif_dpmaif_rx.h"
  30#include "t7xx_hif_dpmaif_tx.h"
  31#include "t7xx_pci.h"
  32#include "t7xx_pcie_mac.h"
  33#include "t7xx_state_monitor.h"
  34
  35unsigned int t7xx_ring_buf_get_next_wr_idx(unsigned int buf_len, unsigned int buf_idx)
  36{
  37        buf_idx++;
  38
  39        return buf_idx < buf_len ? buf_idx : 0;
  40}
  41
  42unsigned int t7xx_ring_buf_rd_wr_count(unsigned int total_cnt, unsigned int rd_idx,
  43                                       unsigned int wr_idx, enum dpmaif_rdwr rd_wr)
  44{
  45        int pkt_cnt;
  46
  47        if (rd_wr == DPMAIF_READ)
  48                pkt_cnt = wr_idx - rd_idx;
  49        else
  50                pkt_cnt = rd_idx - wr_idx - 1;
  51
  52        if (pkt_cnt < 0)
  53                pkt_cnt += total_cnt;
  54
  55        return (unsigned int)pkt_cnt;
  56}
  57
  58static void t7xx_dpmaif_enable_irq(struct dpmaif_ctrl *dpmaif_ctrl)
  59{
  60        struct dpmaif_isr_para *isr_para;
  61        int i;
  62
  63        for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) {
  64                isr_para = &dpmaif_ctrl->isr_para[i];
  65                t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
  66        }
  67}
  68
  69static void t7xx_dpmaif_disable_irq(struct dpmaif_ctrl *dpmaif_ctrl)
  70{
  71        struct dpmaif_isr_para *isr_para;
  72        int i;
  73
  74        for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) {
  75                isr_para = &dpmaif_ctrl->isr_para[i];
  76                t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
  77        }
  78}
  79
  80static void t7xx_dpmaif_irq_cb(struct dpmaif_isr_para *isr_para)
  81{
  82        struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl;
  83        struct dpmaif_hw_intr_st_para intr_status;
  84        struct device *dev = dpmaif_ctrl->dev;
  85        struct dpmaif_hw_info *hw_info;
  86        int i;
  87
  88        memset(&intr_status, 0, sizeof(intr_status));
  89        hw_info = &dpmaif_ctrl->hw_info;
  90
  91        if (t7xx_dpmaif_hw_get_intr_cnt(hw_info, &intr_status, isr_para->dlq_id) < 0) {
  92                dev_err(dev, "Failed to get HW interrupt count\n");
  93                return;
  94        }
  95
  96        t7xx_pcie_mac_clear_int_status(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
  97
  98        for (i = 0; i < intr_status.intr_cnt; i++) {
  99                switch (intr_status.intr_types[i]) {
 100                case DPF_INTR_UL_DONE:
 101                        t7xx_dpmaif_irq_tx_done(dpmaif_ctrl, intr_status.intr_queues[i]);
 102                        break;
 103
 104                case DPF_INTR_UL_DRB_EMPTY:
 105                case DPF_INTR_UL_MD_NOTREADY:
 106                case DPF_INTR_UL_MD_PWR_NOTREADY:
 107                        /* No need to log an error for these */
 108                        break;
 109
 110                case DPF_INTR_DL_BATCNT_LEN_ERR:
 111                        dev_err_ratelimited(dev, "DL interrupt: packet BAT count length error\n");
 112                        t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(hw_info);
 113                        break;
 114
 115                case DPF_INTR_DL_PITCNT_LEN_ERR:
 116                        dev_err_ratelimited(dev, "DL interrupt: PIT count length error\n");
 117                        t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(hw_info);
 118                        break;
 119
 120                case DPF_INTR_DL_Q0_PITCNT_LEN_ERR:
 121                        dev_err_ratelimited(dev, "DL interrupt: DLQ0 PIT count length error\n");
 122                        t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO_DFT);
 123                        break;
 124
 125                case DPF_INTR_DL_Q1_PITCNT_LEN_ERR:
 126                        dev_err_ratelimited(dev, "DL interrupt: DLQ1 PIT count length error\n");
 127                        t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO1);
 128                        break;
 129
 130                case DPF_INTR_DL_DONE:
 131                case DPF_INTR_DL_Q0_DONE:
 132                case DPF_INTR_DL_Q1_DONE:
 133                        t7xx_dpmaif_irq_rx_done(dpmaif_ctrl, intr_status.intr_queues[i]);
 134                        break;
 135
 136                default:
 137                        dev_err_ratelimited(dev, "DL interrupt error: unknown type : %d\n",
 138                                            intr_status.intr_types[i]);
 139                }
 140        }
 141}
 142
 143static irqreturn_t t7xx_dpmaif_isr_handler(int irq, void *data)
 144{
 145        struct dpmaif_isr_para *isr_para = data;
 146        struct dpmaif_ctrl *dpmaif_ctrl;
 147
 148        dpmaif_ctrl = isr_para->dpmaif_ctrl;
 149        if (dpmaif_ctrl->state != DPMAIF_STATE_PWRON) {
 150                dev_err(dpmaif_ctrl->dev, "Interrupt received before initializing DPMAIF\n");
 151                return IRQ_HANDLED;
 152        }
 153
 154        t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
 155        t7xx_dpmaif_irq_cb(isr_para);
 156        t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
 157        return IRQ_HANDLED;
 158}
 159
 160static void t7xx_dpmaif_isr_parameter_init(struct dpmaif_ctrl *dpmaif_ctrl)
 161{
 162        struct dpmaif_isr_para *isr_para;
 163        unsigned char i;
 164
 165        dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO0] = DPMAIF_INT;
 166        dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO1] = DPMAIF2_INT;
 167
 168        for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
 169                isr_para = &dpmaif_ctrl->isr_para[i];
 170                isr_para->dpmaif_ctrl = dpmaif_ctrl;
 171                isr_para->dlq_id = i;
 172                isr_para->pcie_int = dpmaif_ctrl->rxq_int_mapping[i];
 173        }
 174}
 175
 176static void t7xx_dpmaif_register_pcie_irq(struct dpmaif_ctrl *dpmaif_ctrl)
 177{
 178        struct t7xx_pci_dev *t7xx_dev = dpmaif_ctrl->t7xx_dev;
 179        struct dpmaif_isr_para *isr_para;
 180        enum t7xx_int int_type;
 181        int i;
 182
 183        t7xx_dpmaif_isr_parameter_init(dpmaif_ctrl);
 184
 185        for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
 186                isr_para = &dpmaif_ctrl->isr_para[i];
 187                int_type = isr_para->pcie_int;
 188                t7xx_pcie_mac_clear_int(t7xx_dev, int_type);
 189
 190                t7xx_dev->intr_handler[int_type] = t7xx_dpmaif_isr_handler;
 191                t7xx_dev->intr_thread[int_type] = NULL;
 192                t7xx_dev->callback_param[int_type] = isr_para;
 193
 194                t7xx_pcie_mac_clear_int_status(t7xx_dev, int_type);
 195                t7xx_pcie_mac_set_int(t7xx_dev, int_type);
 196        }
 197}
 198
 199static int t7xx_dpmaif_rxtx_sw_allocs(struct dpmaif_ctrl *dpmaif_ctrl)
 200{
 201        struct dpmaif_rx_queue *rx_q;
 202        struct dpmaif_tx_queue *tx_q;
 203        int ret, rx_idx, tx_idx, i;
 204
 205        ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, BAT_TYPE_NORMAL);
 206        if (ret) {
 207                dev_err(dpmaif_ctrl->dev, "Failed to allocate normal BAT table: %d\n", ret);
 208                return ret;
 209        }
 210
 211        ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, BAT_TYPE_FRAG);
 212        if (ret) {
 213                dev_err(dpmaif_ctrl->dev, "Failed to allocate frag BAT table: %d\n", ret);
 214                goto err_free_normal_bat;
 215        }
 216
 217        for (rx_idx = 0; rx_idx < DPMAIF_RXQ_NUM; rx_idx++) {
 218                rx_q = &dpmaif_ctrl->rxq[rx_idx];
 219                rx_q->index = rx_idx;
 220                rx_q->dpmaif_ctrl = dpmaif_ctrl;
 221                ret = t7xx_dpmaif_rxq_init(rx_q);
 222                if (ret)
 223                        goto err_free_rxq;
 224        }
 225
 226        for (tx_idx = 0; tx_idx < DPMAIF_TXQ_NUM; tx_idx++) {
 227                tx_q = &dpmaif_ctrl->txq[tx_idx];
 228                tx_q->index = tx_idx;
 229                tx_q->dpmaif_ctrl = dpmaif_ctrl;
 230                ret = t7xx_dpmaif_txq_init(tx_q);
 231                if (ret)
 232                        goto err_free_txq;
 233        }
 234
 235        ret = t7xx_dpmaif_tx_thread_init(dpmaif_ctrl);
 236        if (ret) {
 237                dev_err(dpmaif_ctrl->dev, "Failed to start TX thread\n");
 238                goto err_free_txq;
 239        }
 240
 241        ret = t7xx_dpmaif_bat_rel_wq_alloc(dpmaif_ctrl);
 242        if (ret)
 243                goto err_thread_rel;
 244
 245        return 0;
 246
 247err_thread_rel:
 248        t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl);
 249
 250err_free_txq:
 251        for (i = 0; i < tx_idx; i++) {
 252                tx_q = &dpmaif_ctrl->txq[i];
 253                t7xx_dpmaif_txq_free(tx_q);
 254        }
 255
 256err_free_rxq:
 257        for (i = 0; i < rx_idx; i++) {
 258                rx_q = &dpmaif_ctrl->rxq[i];
 259                t7xx_dpmaif_rxq_free(rx_q);
 260        }
 261
 262        t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_frag);
 263
 264err_free_normal_bat:
 265        t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_req);
 266
 267        return ret;
 268}
 269
 270static void t7xx_dpmaif_sw_release(struct dpmaif_ctrl *dpmaif_ctrl)
 271{
 272        struct dpmaif_rx_queue *rx_q;
 273        struct dpmaif_tx_queue *tx_q;
 274        int i;
 275
 276        t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl);
 277        t7xx_dpmaif_bat_wq_rel(dpmaif_ctrl);
 278
 279        for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
 280                tx_q = &dpmaif_ctrl->txq[i];
 281                t7xx_dpmaif_txq_free(tx_q);
 282        }
 283
 284        for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
 285                rx_q = &dpmaif_ctrl->rxq[i];
 286                t7xx_dpmaif_rxq_free(rx_q);
 287        }
 288}
 289
 290static int t7xx_dpmaif_start(struct dpmaif_ctrl *dpmaif_ctrl)
 291{
 292        struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info;
 293        struct dpmaif_hw_params hw_init_para;
 294        struct dpmaif_rx_queue *rxq;
 295        struct dpmaif_tx_queue *txq;
 296        unsigned int buf_cnt;
 297        int i, ret = 0;
 298
 299        if (dpmaif_ctrl->state == DPMAIF_STATE_PWRON)
 300                return -EFAULT;
 301
 302        memset(&hw_init_para, 0, sizeof(hw_init_para));
 303
 304        for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
 305                rxq = &dpmaif_ctrl->rxq[i];
 306                rxq->que_started = true;
 307                rxq->index = i;
 308                rxq->budget = rxq->bat_req->bat_size_cnt - 1;
 309
 310                hw_init_para.pkt_bat_base_addr[i] = rxq->bat_req->bat_bus_addr;
 311                hw_init_para.pkt_bat_size_cnt[i] = rxq->bat_req->bat_size_cnt;
 312                hw_init_para.pit_base_addr[i] = rxq->pit_bus_addr;
 313                hw_init_para.pit_size_cnt[i] = rxq->pit_size_cnt;
 314                hw_init_para.frg_bat_base_addr[i] = rxq->bat_frag->bat_bus_addr;
 315                hw_init_para.frg_bat_size_cnt[i] = rxq->bat_frag->bat_size_cnt;
 316        }
 317
 318        bitmap_zero(dpmaif_ctrl->bat_req.bat_bitmap, dpmaif_ctrl->bat_req.bat_size_cnt);
 319        buf_cnt = dpmaif_ctrl->bat_req.bat_size_cnt - 1;
 320        ret = t7xx_dpmaif_rx_buf_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, 0, buf_cnt, true);
 321        if (ret) {
 322                dev_err(dpmaif_ctrl->dev, "Failed to allocate RX buffer: %d\n", ret);
 323                return ret;
 324        }
 325
 326        buf_cnt = dpmaif_ctrl->bat_frag.bat_size_cnt - 1;
 327        ret = t7xx_dpmaif_rx_frag_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, buf_cnt, true);
 328        if (ret) {
 329                dev_err(dpmaif_ctrl->dev, "Failed to allocate frag RX buffer: %d\n", ret);
 330                goto err_free_normal_bat;
 331        }
 332
 333        for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
 334                txq = &dpmaif_ctrl->txq[i];
 335                txq->que_started = true;
 336
 337                hw_init_para.drb_base_addr[i] = txq->drb_bus_addr;
 338                hw_init_para.drb_size_cnt[i] = txq->drb_size_cnt;
 339        }
 340
 341        ret = t7xx_dpmaif_hw_init(hw_info, &hw_init_para);
 342        if (ret) {
 343                dev_err(dpmaif_ctrl->dev, "Failed to initialize DPMAIF HW: %d\n", ret);
 344                goto err_free_frag_bat;
 345        }
 346
 347        ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(hw_info, rxq->bat_req->bat_size_cnt - 1);
 348        if (ret)
 349                goto err_free_frag_bat;
 350
 351        ret = t7xx_dpmaif_dl_snd_hw_frg_cnt(hw_info, rxq->bat_frag->bat_size_cnt - 1);
 352        if (ret)
 353                goto err_free_frag_bat;
 354
 355        t7xx_dpmaif_ul_clr_all_intr(hw_info);
 356        t7xx_dpmaif_dl_clr_all_intr(hw_info);
 357        dpmaif_ctrl->state = DPMAIF_STATE_PWRON;
 358        t7xx_dpmaif_enable_irq(dpmaif_ctrl);
 359        wake_up(&dpmaif_ctrl->tx_wq);
 360        return 0;
 361
 362err_free_frag_bat:
 363        t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag);
 364
 365err_free_normal_bat:
 366        t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req);
 367
 368        return ret;
 369}
 370
 371static void t7xx_dpmaif_stop_sw(struct dpmaif_ctrl *dpmaif_ctrl)
 372{
 373        t7xx_dpmaif_tx_stop(dpmaif_ctrl);
 374        t7xx_dpmaif_rx_stop(dpmaif_ctrl);
 375}
 376
 377static void t7xx_dpmaif_stop_hw(struct dpmaif_ctrl *dpmaif_ctrl)
 378{
 379        t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info);
 380        t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info);
 381}
 382
 383static int t7xx_dpmaif_stop(struct dpmaif_ctrl *dpmaif_ctrl)
 384{
 385        if (!dpmaif_ctrl->dpmaif_sw_init_done) {
 386                dev_err(dpmaif_ctrl->dev, "dpmaif SW init fail\n");
 387                return -EFAULT;
 388        }
 389
 390        if (dpmaif_ctrl->state == DPMAIF_STATE_PWROFF)
 391                return -EFAULT;
 392
 393        t7xx_dpmaif_disable_irq(dpmaif_ctrl);
 394        dpmaif_ctrl->state = DPMAIF_STATE_PWROFF;
 395        t7xx_dpmaif_stop_sw(dpmaif_ctrl);
 396        t7xx_dpmaif_tx_clear(dpmaif_ctrl);
 397        t7xx_dpmaif_rx_clear(dpmaif_ctrl);
 398        return 0;
 399}
 400
 401static int t7xx_dpmaif_suspend(struct t7xx_pci_dev *t7xx_dev, void *param)
 402{
 403        struct dpmaif_ctrl *dpmaif_ctrl = param;
 404
 405        t7xx_dpmaif_tx_stop(dpmaif_ctrl);
 406        t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info);
 407        t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info);
 408        t7xx_dpmaif_disable_irq(dpmaif_ctrl);
 409        t7xx_dpmaif_rx_stop(dpmaif_ctrl);
 410        return 0;
 411}
 412
 413static void t7xx_dpmaif_unmask_dlq_intr(struct dpmaif_ctrl *dpmaif_ctrl)
 414{
 415        int qno;
 416
 417        for (qno = 0; qno < DPMAIF_RXQ_NUM; qno++)
 418                t7xx_dpmaif_dlq_unmask_rx_done(&dpmaif_ctrl->hw_info, qno);
 419}
 420
 421static void t7xx_dpmaif_start_txrx_qs(struct dpmaif_ctrl *dpmaif_ctrl)
 422{
 423        struct dpmaif_rx_queue *rxq;
 424        struct dpmaif_tx_queue *txq;
 425        unsigned int que_cnt;
 426
 427        for (que_cnt = 0; que_cnt < DPMAIF_TXQ_NUM; que_cnt++) {
 428                txq = &dpmaif_ctrl->txq[que_cnt];
 429                txq->que_started = true;
 430        }
 431
 432        for (que_cnt = 0; que_cnt < DPMAIF_RXQ_NUM; que_cnt++) {
 433                rxq = &dpmaif_ctrl->rxq[que_cnt];
 434                rxq->que_started = true;
 435        }
 436}
 437
 438static int t7xx_dpmaif_resume(struct t7xx_pci_dev *t7xx_dev, void *param)
 439{
 440        struct dpmaif_ctrl *dpmaif_ctrl = param;
 441
 442        if (!dpmaif_ctrl)
 443                return 0;
 444
 445        t7xx_dpmaif_start_txrx_qs(dpmaif_ctrl);
 446        t7xx_dpmaif_enable_irq(dpmaif_ctrl);
 447        t7xx_dpmaif_unmask_dlq_intr(dpmaif_ctrl);
 448        t7xx_dpmaif_start_hw(&dpmaif_ctrl->hw_info);
 449        wake_up(&dpmaif_ctrl->tx_wq);
 450        return 0;
 451}
 452
 453static int t7xx_dpmaif_pm_entity_init(struct dpmaif_ctrl *dpmaif_ctrl)
 454{
 455        struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity;
 456        int ret;
 457
 458        INIT_LIST_HEAD(&dpmaif_pm_entity->entity);
 459        dpmaif_pm_entity->suspend = &t7xx_dpmaif_suspend;
 460        dpmaif_pm_entity->suspend_late = NULL;
 461        dpmaif_pm_entity->resume_early = NULL;
 462        dpmaif_pm_entity->resume = &t7xx_dpmaif_resume;
 463        dpmaif_pm_entity->id = PM_ENTITY_ID_DATA;
 464        dpmaif_pm_entity->entity_param = dpmaif_ctrl;
 465
 466        ret = t7xx_pci_pm_entity_register(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity);
 467        if (ret)
 468                dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n");
 469
 470        return ret;
 471}
 472
 473static int t7xx_dpmaif_pm_entity_release(struct dpmaif_ctrl *dpmaif_ctrl)
 474{
 475        struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity;
 476        int ret;
 477
 478        ret = t7xx_pci_pm_entity_unregister(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity);
 479        if (ret < 0)
 480                dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n");
 481
 482        return ret;
 483}
 484
 485int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state)
 486{
 487        int ret = 0;
 488
 489        switch (state) {
 490        case MD_STATE_WAITING_FOR_HS1:
 491                ret = t7xx_dpmaif_start(dpmaif_ctrl);
 492                break;
 493
 494        case MD_STATE_EXCEPTION:
 495                ret = t7xx_dpmaif_stop(dpmaif_ctrl);
 496                break;
 497
 498        case MD_STATE_STOPPED:
 499                ret = t7xx_dpmaif_stop(dpmaif_ctrl);
 500                break;
 501
 502        case MD_STATE_WAITING_TO_STOP:
 503                t7xx_dpmaif_stop_hw(dpmaif_ctrl);
 504                break;
 505
 506        default:
 507                break;
 508        }
 509
 510        return ret;
 511}
 512
 513/**
 514 * t7xx_dpmaif_hif_init() - Initialize data path.
 515 * @t7xx_dev: MTK context structure.
 516 * @callbacks: Callbacks implemented by the network layer to handle RX skb and
 517 *             event notifications.
 518 *
 519 * Allocate and initialize datapath control block.
 520 * Register datapath ISR, TX and RX resources.
 521 *
 522 * Return:
 523 * * dpmaif_ctrl pointer - Pointer to DPMAIF context structure.
 524 * * NULL                - In case of error.
 525 */
 526struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev,
 527                                         struct dpmaif_callbacks *callbacks)
 528{
 529        struct device *dev = &t7xx_dev->pdev->dev;
 530        struct dpmaif_ctrl *dpmaif_ctrl;
 531        int ret;
 532
 533        if (!callbacks)
 534                return NULL;
 535
 536        dpmaif_ctrl = devm_kzalloc(dev, sizeof(*dpmaif_ctrl), GFP_KERNEL);
 537        if (!dpmaif_ctrl)
 538                return NULL;
 539
 540        dpmaif_ctrl->t7xx_dev = t7xx_dev;
 541        dpmaif_ctrl->callbacks = callbacks;
 542        dpmaif_ctrl->dev = dev;
 543        dpmaif_ctrl->dpmaif_sw_init_done = false;
 544        dpmaif_ctrl->hw_info.dev = dev;
 545        dpmaif_ctrl->hw_info.pcie_base = t7xx_dev->base_addr.pcie_ext_reg_base -
 546                                         t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
 547
 548        ret = t7xx_dpmaif_pm_entity_init(dpmaif_ctrl);
 549        if (ret)
 550                return NULL;
 551
 552        t7xx_dpmaif_register_pcie_irq(dpmaif_ctrl);
 553        t7xx_dpmaif_disable_irq(dpmaif_ctrl);
 554
 555        ret = t7xx_dpmaif_rxtx_sw_allocs(dpmaif_ctrl);
 556        if (ret) {
 557                t7xx_dpmaif_pm_entity_release(dpmaif_ctrl);
 558                dev_err(dev, "Failed to allocate RX/TX SW resources: %d\n", ret);
 559                return NULL;
 560        }
 561
 562        dpmaif_ctrl->dpmaif_sw_init_done = true;
 563        return dpmaif_ctrl;
 564}
 565
 566void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl)
 567{
 568        if (dpmaif_ctrl->dpmaif_sw_init_done) {
 569                t7xx_dpmaif_stop(dpmaif_ctrl);
 570                t7xx_dpmaif_pm_entity_release(dpmaif_ctrl);
 571                t7xx_dpmaif_sw_release(dpmaif_ctrl);
 572                dpmaif_ctrl->dpmaif_sw_init_done = false;
 573        }
 574}
 575