linux/drivers/net/wireless/ath/ath11k/dp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: BSD-3-Clause-Clear
   2/*
   3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
   4 */
   5
   6#include "core.h"
   7#include "dp_tx.h"
   8#include "hal_tx.h"
   9#include "debug.h"
  10#include "dp_rx.h"
  11#include "peer.h"
  12
  13static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
  14                                          struct sk_buff *skb)
  15{
  16        dev_kfree_skb_any(skb);
  17}
  18
  19void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
  20{
  21        struct ath11k_base *ab = ar->ab;
  22        struct ath11k_peer *peer;
  23
  24        /* TODO: Any other peer specific DP cleanup */
  25
  26        spin_lock_bh(&ab->base_lock);
  27        peer = ath11k_peer_find(ab, vdev_id, addr);
  28        if (!peer) {
  29                ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
  30                            addr, vdev_id);
  31                spin_unlock_bh(&ab->base_lock);
  32                return;
  33        }
  34
  35        ath11k_peer_rx_tid_cleanup(ar, peer);
  36        spin_unlock_bh(&ab->base_lock);
  37}
  38
  39int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
  40{
  41        struct ath11k_base *ab = ar->ab;
  42        u32 reo_dest;
  43        int ret;
  44
  45        /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
  46        reo_dest = ar->dp.mac_id + 1;
  47        ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
  48                                        WMI_PEER_SET_DEFAULT_ROUTING,
  49                                        DP_RX_HASH_ENABLE | (reo_dest << 1));
  50
  51        if (ret) {
  52                ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
  53                            ret, addr, vdev_id);
  54                return ret;
  55        }
  56
  57        ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id,
  58                                       HAL_DESC_REO_NON_QOS_TID, 1, 0);
  59        if (ret) {
  60                ath11k_warn(ab, "failed to setup rxd tid queue for non-qos tid %d\n",
  61                            ret);
  62                return ret;
  63        }
  64
  65        ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, 0, 1, 0);
  66        if (ret) {
  67                ath11k_warn(ab, "failed to setup rxd tid queue for tid 0 %d\n",
  68                            ret);
  69                return ret;
  70        }
  71
  72        /* TODO: Setup other peer specific resource used in data path */
  73
  74        return 0;
  75}
  76
  77void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
  78{
  79        if (!ring->vaddr_unaligned)
  80                return;
  81
  82        dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
  83                          ring->paddr_unaligned);
  84
  85        ring->vaddr_unaligned = NULL;
  86}
  87
  88int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
  89                         enum hal_ring_type type, int ring_num,
  90                         int mac_id, int num_entries)
  91{
  92        struct hal_srng_params params = { 0 };
  93        int entry_sz = ath11k_hal_srng_get_entrysize(type);
  94        int max_entries = ath11k_hal_srng_get_max_entries(type);
  95        int ret;
  96
  97        if (max_entries < 0 || entry_sz < 0)
  98                return -EINVAL;
  99
 100        if (num_entries > max_entries)
 101                num_entries = max_entries;
 102
 103        ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
 104        ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
 105                                                   &ring->paddr_unaligned,
 106                                                   GFP_KERNEL);
 107        if (!ring->vaddr_unaligned)
 108                return -ENOMEM;
 109
 110        ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
 111        ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
 112                      (unsigned long)ring->vaddr_unaligned);
 113
 114        params.ring_base_vaddr = ring->vaddr;
 115        params.ring_base_paddr = ring->paddr;
 116        params.num_entries = num_entries;
 117
 118        switch (type) {
 119        case HAL_REO_DST:
 120                params.intr_batch_cntr_thres_entries =
 121                                        HAL_SRNG_INT_BATCH_THRESHOLD_RX;
 122                params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
 123                break;
 124        case HAL_RXDMA_BUF:
 125        case HAL_RXDMA_MONITOR_BUF:
 126        case HAL_RXDMA_MONITOR_STATUS:
 127                params.low_threshold = num_entries >> 3;
 128                params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
 129                params.intr_batch_cntr_thres_entries = 0;
 130                params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
 131                break;
 132        case HAL_WBM2SW_RELEASE:
 133                if (ring_num < 3) {
 134                        params.intr_batch_cntr_thres_entries =
 135                                        HAL_SRNG_INT_BATCH_THRESHOLD_TX;
 136                        params.intr_timer_thres_us =
 137                                        HAL_SRNG_INT_TIMER_THRESHOLD_TX;
 138                        break;
 139                }
 140                /* follow through when ring_num >= 3 */
 141                /* fall through */
 142        case HAL_REO_EXCEPTION:
 143        case HAL_REO_REINJECT:
 144        case HAL_REO_CMD:
 145        case HAL_REO_STATUS:
 146        case HAL_TCL_DATA:
 147        case HAL_TCL_CMD:
 148        case HAL_TCL_STATUS:
 149        case HAL_WBM_IDLE_LINK:
 150        case HAL_SW2WBM_RELEASE:
 151        case HAL_RXDMA_DST:
 152        case HAL_RXDMA_MONITOR_DST:
 153        case HAL_RXDMA_MONITOR_DESC:
 154        case HAL_RXDMA_DIR_BUF:
 155                params.intr_batch_cntr_thres_entries =
 156                                        HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
 157                params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
 158                break;
 159        default:
 160                ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
 161                return -EINVAL;
 162        }
 163
 164        ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
 165        if (ret < 0) {
 166                ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
 167                            ret, ring_num);
 168                return ret;
 169        }
 170
 171        ring->ring_id = ret;
 172
 173        return 0;
 174}
 175
 176static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
 177{
 178        struct ath11k_dp *dp = &ab->dp;
 179        int i;
 180
 181        ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
 182        ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
 183        ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
 184        for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
 185                ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
 186                ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
 187        }
 188        ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
 189        ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
 190        ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
 191        ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
 192        ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
 193}
 194
 195static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
 196{
 197        struct ath11k_dp *dp = &ab->dp;
 198        struct hal_srng *srng;
 199        int i, ret;
 200
 201        ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
 202                                   HAL_SW2WBM_RELEASE, 0, 0,
 203                                   DP_WBM_RELEASE_RING_SIZE);
 204        if (ret) {
 205                ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
 206                            ret);
 207                goto err;
 208        }
 209
 210        ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
 211                                   DP_TCL_CMD_RING_SIZE);
 212        if (ret) {
 213                ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
 214                goto err;
 215        }
 216
 217        ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
 218                                   0, 0, DP_TCL_STATUS_RING_SIZE);
 219        if (ret) {
 220                ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
 221                goto err;
 222        }
 223
 224        for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
 225                ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
 226                                           HAL_TCL_DATA, i, 0,
 227                                           DP_TCL_DATA_RING_SIZE);
 228                if (ret) {
 229                        ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
 230                                    i, ret);
 231                        goto err;
 232                }
 233
 234                ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
 235                                           HAL_WBM2SW_RELEASE, i, 0,
 236                                           DP_TX_COMP_RING_SIZE);
 237                if (ret) {
 238                        ath11k_warn(ab, "failed to set up tcl_comp ring ring (%d) :%d\n",
 239                                    i, ret);
 240                        goto err;
 241                }
 242
 243                srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
 244                ath11k_hal_tx_init_data_ring(ab, srng);
 245        }
 246
 247        ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
 248                                   0, 0, DP_REO_REINJECT_RING_SIZE);
 249        if (ret) {
 250                ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
 251                            ret);
 252                goto err;
 253        }
 254
 255        ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
 256                                   3, 0, DP_RX_RELEASE_RING_SIZE);
 257        if (ret) {
 258                ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
 259                goto err;
 260        }
 261
 262        ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
 263                                   0, 0, DP_REO_EXCEPTION_RING_SIZE);
 264        if (ret) {
 265                ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
 266                            ret);
 267                goto err;
 268        }
 269
 270        ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
 271                                   0, 0, DP_REO_CMD_RING_SIZE);
 272        if (ret) {
 273                ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
 274                goto err;
 275        }
 276
 277        srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
 278        ath11k_hal_reo_init_cmd_ring(ab, srng);
 279
 280        ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
 281                                   0, 0, DP_REO_STATUS_RING_SIZE);
 282        if (ret) {
 283                ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
 284                goto err;
 285        }
 286
 287        ath11k_hal_reo_hw_setup(ab);
 288
 289        return 0;
 290
 291err:
 292        ath11k_dp_srng_common_cleanup(ab);
 293
 294        return ret;
 295}
 296
 297static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
 298{
 299        struct ath11k_dp *dp = &ab->dp;
 300        struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
 301        int i;
 302
 303        for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
 304                if (!slist[i].vaddr)
 305                        continue;
 306
 307                dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
 308                                  slist[i].vaddr, slist[i].paddr);
 309                slist[i].vaddr = NULL;
 310        }
 311}
 312
 313static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
 314                                                  int size,
 315                                                  u32 n_link_desc_bank,
 316                                                  u32 n_link_desc,
 317                                                  u32 last_bank_sz)
 318{
 319        struct ath11k_dp *dp = &ab->dp;
 320        struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
 321        struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
 322        u32 n_entries_per_buf;
 323        int num_scatter_buf, scatter_idx;
 324        struct hal_wbm_link_desc *scatter_buf;
 325        int align_bytes, n_entries;
 326        dma_addr_t paddr;
 327        int rem_entries;
 328        int i;
 329        int ret = 0;
 330        u32 end_offset;
 331
 332        n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
 333                            ath11k_hal_srng_get_entrysize(HAL_WBM_IDLE_LINK);
 334        num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
 335
 336        if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
 337                return -EINVAL;
 338
 339        for (i = 0; i < num_scatter_buf; i++) {
 340                slist[i].vaddr = dma_alloc_coherent(ab->dev,
 341                                                    HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
 342                                                    &slist[i].paddr, GFP_KERNEL);
 343                if (!slist[i].vaddr) {
 344                        ret = -ENOMEM;
 345                        goto err;
 346                }
 347        }
 348
 349        scatter_idx = 0;
 350        scatter_buf = slist[scatter_idx].vaddr;
 351        rem_entries = n_entries_per_buf;
 352
 353        for (i = 0; i < n_link_desc_bank; i++) {
 354                align_bytes = link_desc_banks[i].vaddr -
 355                              link_desc_banks[i].vaddr_unaligned;
 356                n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
 357                             HAL_LINK_DESC_SIZE;
 358                paddr = link_desc_banks[i].paddr;
 359                while (n_entries) {
 360                        ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
 361                        n_entries--;
 362                        paddr += HAL_LINK_DESC_SIZE;
 363                        if (rem_entries) {
 364                                rem_entries--;
 365                                scatter_buf++;
 366                                continue;
 367                        }
 368
 369                        rem_entries = n_entries_per_buf;
 370                        scatter_idx++;
 371                        scatter_buf = slist[scatter_idx].vaddr;
 372                }
 373        }
 374
 375        end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
 376                     sizeof(struct hal_wbm_link_desc);
 377        ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
 378                                        n_link_desc, end_offset);
 379
 380        return 0;
 381
 382err:
 383        ath11k_dp_scatter_idle_link_desc_cleanup(ab);
 384
 385        return ret;
 386}
 387
 388static void
 389ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
 390                              struct dp_link_desc_bank *link_desc_banks)
 391{
 392        int i;
 393
 394        for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
 395                if (link_desc_banks[i].vaddr_unaligned) {
 396                        dma_free_coherent(ab->dev,
 397                                          link_desc_banks[i].size,
 398                                          link_desc_banks[i].vaddr_unaligned,
 399                                          link_desc_banks[i].paddr_unaligned);
 400                        link_desc_banks[i].vaddr_unaligned = NULL;
 401                }
 402        }
 403}
 404
 405static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
 406                                          struct dp_link_desc_bank *desc_bank,
 407                                          int n_link_desc_bank,
 408                                          int last_bank_sz)
 409{
 410        struct ath11k_dp *dp = &ab->dp;
 411        int i;
 412        int ret = 0;
 413        int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
 414
 415        for (i = 0; i < n_link_desc_bank; i++) {
 416                if (i == (n_link_desc_bank - 1) && last_bank_sz)
 417                        desc_sz = last_bank_sz;
 418
 419                desc_bank[i].vaddr_unaligned =
 420                                        dma_alloc_coherent(ab->dev, desc_sz,
 421                                                           &desc_bank[i].paddr_unaligned,
 422                                                           GFP_KERNEL);
 423                if (!desc_bank[i].vaddr_unaligned) {
 424                        ret = -ENOMEM;
 425                        goto err;
 426                }
 427
 428                desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
 429                                               HAL_LINK_DESC_ALIGN);
 430                desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
 431                                     ((unsigned long)desc_bank[i].vaddr -
 432                                      (unsigned long)desc_bank[i].vaddr_unaligned);
 433                desc_bank[i].size = desc_sz;
 434        }
 435
 436        return 0;
 437
 438err:
 439        ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
 440
 441        return ret;
 442}
 443
 444void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
 445                                 struct dp_link_desc_bank *desc_bank,
 446                                 u32 ring_type, struct dp_srng *ring)
 447{
 448        ath11k_dp_link_desc_bank_free(ab, desc_bank);
 449
 450        if (ring_type != HAL_RXDMA_MONITOR_DESC) {
 451                ath11k_dp_srng_cleanup(ab, ring);
 452                ath11k_dp_scatter_idle_link_desc_cleanup(ab);
 453        }
 454}
 455
 456static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
 457{
 458        struct ath11k_dp *dp = &ab->dp;
 459        u32 n_mpdu_link_desc, n_mpdu_queue_desc;
 460        u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
 461        int ret = 0;
 462
 463        n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
 464                           HAL_NUM_MPDUS_PER_LINK_DESC;
 465
 466        n_mpdu_queue_desc = n_mpdu_link_desc /
 467                            HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
 468
 469        n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
 470                               DP_AVG_MSDUS_PER_FLOW) /
 471                              HAL_NUM_TX_MSDUS_PER_LINK_DESC;
 472
 473        n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
 474                               DP_AVG_MSDUS_PER_MPDU) /
 475                              HAL_NUM_RX_MSDUS_PER_LINK_DESC;
 476
 477        *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
 478                      n_tx_msdu_link_desc + n_rx_msdu_link_desc;
 479
 480        if (*n_link_desc & (*n_link_desc - 1))
 481                *n_link_desc = 1 << fls(*n_link_desc);
 482
 483        ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
 484                                   HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
 485        if (ret) {
 486                ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
 487                return ret;
 488        }
 489        return ret;
 490}
 491
 492int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
 493                              struct dp_link_desc_bank *link_desc_banks,
 494                              u32 ring_type, struct hal_srng *srng,
 495                              u32 n_link_desc)
 496{
 497        u32 tot_mem_sz;
 498        u32 n_link_desc_bank, last_bank_sz;
 499        u32 entry_sz, align_bytes, n_entries;
 500        u32 paddr;
 501        u32 *desc;
 502        int i, ret;
 503
 504        tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
 505        tot_mem_sz += HAL_LINK_DESC_ALIGN;
 506
 507        if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
 508                n_link_desc_bank = 1;
 509                last_bank_sz = tot_mem_sz;
 510        } else {
 511                n_link_desc_bank = tot_mem_sz /
 512                                   (DP_LINK_DESC_ALLOC_SIZE_THRESH -
 513                                    HAL_LINK_DESC_ALIGN);
 514                last_bank_sz = tot_mem_sz %
 515                               (DP_LINK_DESC_ALLOC_SIZE_THRESH -
 516                                HAL_LINK_DESC_ALIGN);
 517
 518                if (last_bank_sz)
 519                        n_link_desc_bank += 1;
 520        }
 521
 522        if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
 523                return -EINVAL;
 524
 525        ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
 526                                             n_link_desc_bank, last_bank_sz);
 527        if (ret)
 528                return ret;
 529
 530        /* Setup link desc idle list for HW internal usage */
 531        entry_sz = ath11k_hal_srng_get_entrysize(ring_type);
 532        tot_mem_sz = entry_sz * n_link_desc;
 533
 534        /* Setup scatter desc list when the total memory requirement is more */
 535        if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
 536            ring_type != HAL_RXDMA_MONITOR_DESC) {
 537                ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
 538                                                             n_link_desc_bank,
 539                                                             n_link_desc,
 540                                                             last_bank_sz);
 541                if (ret) {
 542                        ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
 543                                    ret);
 544                        goto fail_desc_bank_free;
 545                }
 546
 547                return 0;
 548        }
 549
 550        spin_lock_bh(&srng->lock);
 551
 552        ath11k_hal_srng_access_begin(ab, srng);
 553
 554        for (i = 0; i < n_link_desc_bank; i++) {
 555                align_bytes = link_desc_banks[i].vaddr -
 556                              link_desc_banks[i].vaddr_unaligned;
 557                n_entries = (link_desc_banks[i].size - align_bytes) /
 558                            HAL_LINK_DESC_SIZE;
 559                paddr = link_desc_banks[i].paddr;
 560                while (n_entries &&
 561                       (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
 562                        ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
 563                                                      i, paddr);
 564                        n_entries--;
 565                        paddr += HAL_LINK_DESC_SIZE;
 566                }
 567        }
 568
 569        ath11k_hal_srng_access_end(ab, srng);
 570
 571        spin_unlock_bh(&srng->lock);
 572
 573        return 0;
 574
 575fail_desc_bank_free:
 576        ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
 577
 578        return ret;
 579}
 580
 581int ath11k_dp_service_srng(struct ath11k_base *ab,
 582                           struct ath11k_ext_irq_grp *irq_grp,
 583                           int budget)
 584{
 585        struct napi_struct *napi = &irq_grp->napi;
 586        int grp_id = irq_grp->grp_id;
 587        int work_done = 0;
 588        int i = 0;
 589        int tot_work_done = 0;
 590
 591        while (ath11k_tx_ring_mask[grp_id] >> i) {
 592                if (ath11k_tx_ring_mask[grp_id] & BIT(i))
 593                        ath11k_dp_tx_completion_handler(ab, i);
 594                i++;
 595        }
 596
 597        if (ath11k_rx_err_ring_mask[grp_id]) {
 598                work_done = ath11k_dp_process_rx_err(ab, napi, budget);
 599                budget -= work_done;
 600                tot_work_done += work_done;
 601                if (budget <= 0)
 602                        goto done;
 603        }
 604
 605        if (ath11k_rx_wbm_rel_ring_mask[grp_id]) {
 606                work_done = ath11k_dp_rx_process_wbm_err(ab,
 607                                                         napi,
 608                                                         budget);
 609                budget -= work_done;
 610                tot_work_done += work_done;
 611
 612                if (budget <= 0)
 613                        goto done;
 614        }
 615
 616        if (ath11k_rx_ring_mask[grp_id]) {
 617                for (i = 0; i <  ab->num_radios; i++) {
 618                        if (ath11k_rx_ring_mask[grp_id] & BIT(i)) {
 619                                work_done = ath11k_dp_process_rx(ab, i, napi,
 620                                                                 &irq_grp->pending_q,
 621                                                                 budget);
 622                                budget -= work_done;
 623                                tot_work_done += work_done;
 624                        }
 625                        if (budget <= 0)
 626                                goto done;
 627                }
 628        }
 629
 630        if (rx_mon_status_ring_mask[grp_id]) {
 631                for (i = 0; i <  ab->num_radios; i++) {
 632                        if (rx_mon_status_ring_mask[grp_id] & BIT(i)) {
 633                                work_done =
 634                                ath11k_dp_rx_process_mon_rings(ab,
 635                                                               i, napi,
 636                                                               budget);
 637                                budget -= work_done;
 638                                tot_work_done += work_done;
 639                        }
 640                        if (budget <= 0)
 641                                goto done;
 642                }
 643        }
 644
 645        if (ath11k_reo_status_ring_mask[grp_id])
 646                ath11k_dp_process_reo_status(ab);
 647
 648        for (i = 0; i < ab->num_radios; i++) {
 649                if (ath11k_rxdma2host_ring_mask[grp_id] & BIT(i)) {
 650                        work_done = ath11k_dp_process_rxdma_err(ab, i, budget);
 651                        budget -= work_done;
 652                        tot_work_done += work_done;
 653                }
 654
 655                if (budget <= 0)
 656                        goto done;
 657
 658                if (ath11k_host2rxdma_ring_mask[grp_id] & BIT(i)) {
 659                        struct ath11k_pdev_dp *dp = &ab->pdevs[i].ar->dp;
 660                        struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
 661
 662                        ath11k_dp_rxbufs_replenish(ab, i, rx_ring, 0,
 663                                                   HAL_RX_BUF_RBM_SW3_BM,
 664                                                   GFP_ATOMIC);
 665                }
 666        }
 667        /* TODO: Implement handler for other interrupts */
 668
 669done:
 670        return tot_work_done;
 671}
 672
 673void ath11k_dp_pdev_free(struct ath11k_base *ab)
 674{
 675        struct ath11k *ar;
 676        int i;
 677
 678        for (i = 0; i < ab->num_radios; i++) {
 679                ar = ab->pdevs[i].ar;
 680                ath11k_dp_rx_pdev_free(ab, i);
 681                ath11k_debug_unregister(ar);
 682                ath11k_dp_rx_pdev_mon_detach(ar);
 683        }
 684}
 685
 686void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
 687{
 688        struct ath11k *ar;
 689        struct ath11k_pdev_dp *dp;
 690        int i;
 691
 692        for (i = 0; i <  ab->num_radios; i++) {
 693                ar = ab->pdevs[i].ar;
 694                dp = &ar->dp;
 695                dp->mac_id = i;
 696                idr_init(&dp->rx_refill_buf_ring.bufs_idr);
 697                spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
 698                atomic_set(&dp->num_tx_pending, 0);
 699                init_waitqueue_head(&dp->tx_empty_waitq);
 700                idr_init(&dp->rx_mon_status_refill_ring.bufs_idr);
 701                spin_lock_init(&dp->rx_mon_status_refill_ring.idr_lock);
 702                idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
 703                spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
 704        }
 705}
 706
 707int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
 708{
 709        struct ath11k *ar;
 710        int ret;
 711        int i;
 712
 713        /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
 714        for (i = 0; i < ab->num_radios; i++) {
 715                ar = ab->pdevs[i].ar;
 716                ret = ath11k_dp_rx_pdev_alloc(ab, i);
 717                if (ret) {
 718                        ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
 719                                    i);
 720                        goto err;
 721                }
 722                ret = ath11k_dp_rx_pdev_mon_attach(ar);
 723                if (ret) {
 724                        ath11k_warn(ab, "failed to initialize mon pdev %d\n",
 725                                    i);
 726                        goto err;
 727                }
 728        }
 729
 730        return 0;
 731
 732err:
 733        ath11k_dp_pdev_free(ab);
 734
 735        return ret;
 736}
 737
 738int ath11k_dp_htt_connect(struct ath11k_dp *dp)
 739{
 740        struct ath11k_htc_svc_conn_req conn_req;
 741        struct ath11k_htc_svc_conn_resp conn_resp;
 742        int status;
 743
 744        memset(&conn_req, 0, sizeof(conn_req));
 745        memset(&conn_resp, 0, sizeof(conn_resp));
 746
 747        conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
 748        conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
 749
 750        /* connect to control service */
 751        conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
 752
 753        status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
 754                                            &conn_resp);
 755
 756        if (status)
 757                return status;
 758
 759        dp->eid = conn_resp.eid;
 760
 761        return 0;
 762}
 763
 764static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
 765{
 766         /* For STA mode, enable address search index,
 767          * tcl uses ast_hash value in the descriptor.
 768          */
 769        switch (arvif->vdev_type) {
 770        case WMI_VDEV_TYPE_STA:
 771                arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
 772                arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
 773                break;
 774        case WMI_VDEV_TYPE_AP:
 775        case WMI_VDEV_TYPE_IBSS:
 776                arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
 777                arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
 778                break;
 779        case WMI_VDEV_TYPE_MONITOR:
 780        default:
 781                return;
 782        }
 783}
 784
 785void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
 786{
 787        arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
 788                               FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
 789                                          arvif->vdev_id) |
 790                               FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
 791                                          ar->pdev->pdev_id);
 792
 793        /* set HTT extension valid bit to 0 by default */
 794        arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
 795
 796        ath11k_dp_update_vdev_search(arvif);
 797}
 798
 799static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
 800{
 801        struct ath11k_base *ab = (struct ath11k_base *)ctx;
 802        struct sk_buff *msdu = skb;
 803
 804        dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
 805                         DMA_TO_DEVICE);
 806
 807        dev_kfree_skb_any(msdu);
 808
 809        return 0;
 810}
 811
 812void ath11k_dp_free(struct ath11k_base *ab)
 813{
 814        struct ath11k_dp *dp = &ab->dp;
 815        int i;
 816
 817        ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
 818                                    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
 819
 820        ath11k_dp_srng_common_cleanup(ab);
 821
 822        ath11k_dp_reo_cmd_list_cleanup(ab);
 823
 824        for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
 825                spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
 826                idr_for_each(&dp->tx_ring[i].txbuf_idr,
 827                             ath11k_dp_tx_pending_cleanup, ab);
 828                idr_destroy(&dp->tx_ring[i].txbuf_idr);
 829                spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
 830                kfree(dp->tx_ring[i].tx_status);
 831        }
 832
 833        /* Deinit any SOC level resource */
 834}
 835
 836int ath11k_dp_alloc(struct ath11k_base *ab)
 837{
 838        struct ath11k_dp *dp = &ab->dp;
 839        struct hal_srng *srng = NULL;
 840        size_t size = 0;
 841        u32 n_link_desc = 0;
 842        int ret;
 843        int i;
 844
 845        dp->ab = ab;
 846
 847        INIT_LIST_HEAD(&dp->reo_cmd_list);
 848        INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
 849        spin_lock_init(&dp->reo_cmd_lock);
 850
 851        ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
 852        if (ret) {
 853                ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
 854                return ret;
 855        }
 856
 857        srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
 858
 859        ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
 860                                        HAL_WBM_IDLE_LINK, srng, n_link_desc);
 861        if (ret) {
 862                ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
 863                return ret;
 864        }
 865
 866        ret = ath11k_dp_srng_common_setup(ab);
 867        if (ret)
 868                goto fail_link_desc_cleanup;
 869
 870        size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
 871
 872        for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
 873                idr_init(&dp->tx_ring[i].txbuf_idr);
 874                spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
 875                dp->tx_ring[i].tcl_data_ring_id = i;
 876
 877                dp->tx_ring[i].tx_status_head = 0;
 878                dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
 879                dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
 880                if (!dp->tx_ring[i].tx_status)
 881                        goto fail_cmn_srng_cleanup;
 882        }
 883
 884        for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
 885                ath11k_hal_tx_set_dscp_tid_map(ab, i);
 886
 887        /* Init any SOC level resource for DP */
 888
 889        return 0;
 890
 891fail_cmn_srng_cleanup:
 892        ath11k_dp_srng_common_cleanup(ab);
 893
 894fail_link_desc_cleanup:
 895        ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
 896                                    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
 897
 898        return ret;
 899}
 900