linux/drivers/net/wireless/ath/ath11k/ce.c
<<
>>
Prefs
   1// SPDX-License-Identifier: BSD-3-Clause-Clear
   2/*
   3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
   4 */
   5
   6#include "dp_rx.h"
   7#include "debug.h"
   8
   9static const struct ce_attr host_ce_config_wlan[] = {
  10        /* CE0: host->target HTC control and raw streams */
  11        {
  12                .flags = CE_ATTR_FLAGS,
  13                .src_nentries = 16,
  14                .src_sz_max = 2048,
  15                .dest_nentries = 0,
  16        },
  17
  18        /* CE1: target->host HTT + HTC control */
  19        {
  20                .flags = CE_ATTR_FLAGS,
  21                .src_nentries = 0,
  22                .src_sz_max = 2048,
  23                .dest_nentries = 512,
  24                .recv_cb = ath11k_htc_rx_completion_handler,
  25        },
  26
  27        /* CE2: target->host WMI */
  28        {
  29                .flags = CE_ATTR_FLAGS,
  30                .src_nentries = 0,
  31                .src_sz_max = 2048,
  32                .dest_nentries = 512,
  33                .recv_cb = ath11k_htc_rx_completion_handler,
  34        },
  35
  36        /* CE3: host->target WMI (mac0) */
  37        {
  38                .flags = CE_ATTR_FLAGS,
  39                .src_nentries = 32,
  40                .src_sz_max = 2048,
  41                .dest_nentries = 0,
  42        },
  43
  44        /* CE4: host->target HTT */
  45        {
  46                .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
  47                .src_nentries = 2048,
  48                .src_sz_max = 256,
  49                .dest_nentries = 0,
  50        },
  51
  52        /* CE5: target->host pktlog */
  53        {
  54                .flags = CE_ATTR_FLAGS,
  55                .src_nentries = 0,
  56                .src_sz_max = 2048,
  57                .dest_nentries = 512,
  58                .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
  59        },
  60
  61        /* CE6: target autonomous hif_memcpy */
  62        {
  63                .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
  64                .src_nentries = 0,
  65                .src_sz_max = 0,
  66                .dest_nentries = 0,
  67        },
  68
  69        /* CE7: host->target WMI (mac1) */
  70        {
  71                .flags = CE_ATTR_FLAGS,
  72                .src_nentries = 32,
  73                .src_sz_max = 2048,
  74                .dest_nentries = 0,
  75        },
  76
  77        /* CE8: target autonomous hif_memcpy */
  78        {
  79                .flags = CE_ATTR_FLAGS,
  80                .src_nentries = 0,
  81                .src_sz_max = 0,
  82                .dest_nentries = 0,
  83        },
  84
  85        /* CE9: host->target WMI (mac2) */
  86        {
  87                .flags = CE_ATTR_FLAGS,
  88                .src_nentries = 32,
  89                .src_sz_max = 2048,
  90                .dest_nentries = 0,
  91        },
  92
  93        /* CE10: target->host HTT */
  94        {
  95                .flags = CE_ATTR_FLAGS,
  96                .src_nentries = 0,
  97                .src_sz_max = 2048,
  98                .dest_nentries = 512,
  99                .recv_cb = ath11k_htc_rx_completion_handler,
 100        },
 101
 102        /* CE11: Not used */
 103        {
 104                .flags = CE_ATTR_FLAGS,
 105                .src_nentries = 0,
 106                .src_sz_max = 0,
 107                .dest_nentries = 0,
 108        },
 109};
 110
 111static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
 112                                         struct sk_buff *skb, dma_addr_t paddr)
 113{
 114        struct ath11k_base *ab = pipe->ab;
 115        struct ath11k_ce_ring *ring = pipe->dest_ring;
 116        struct hal_srng *srng;
 117        unsigned int write_index;
 118        unsigned int nentries_mask = ring->nentries_mask;
 119        u32 *desc;
 120        int ret;
 121
 122        lockdep_assert_held(&ab->ce.ce_lock);
 123
 124        write_index = ring->write_index;
 125
 126        srng = &ab->hal.srng_list[ring->hal_ring_id];
 127
 128        spin_lock_bh(&srng->lock);
 129
 130        ath11k_hal_srng_access_begin(ab, srng);
 131
 132        if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
 133                ret = -ENOSPC;
 134                goto exit;
 135        }
 136
 137        desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
 138        if (!desc) {
 139                ret = -ENOSPC;
 140                goto exit;
 141        }
 142
 143        ath11k_hal_ce_dst_set_desc(desc, paddr);
 144
 145        ring->skb[write_index] = skb;
 146        write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
 147        ring->write_index = write_index;
 148
 149        pipe->rx_buf_needed--;
 150
 151        ret = 0;
 152exit:
 153        ath11k_hal_srng_access_end(ab, srng);
 154
 155        spin_unlock_bh(&srng->lock);
 156
 157        return ret;
 158}
 159
 160static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
 161{
 162        struct ath11k_base *ab = pipe->ab;
 163        struct sk_buff *skb;
 164        dma_addr_t paddr;
 165        int ret = 0;
 166
 167        if (!(pipe->dest_ring || pipe->status_ring))
 168                return 0;
 169
 170        spin_lock_bh(&ab->ce.ce_lock);
 171        while (pipe->rx_buf_needed) {
 172                skb = dev_alloc_skb(pipe->buf_sz);
 173                if (!skb) {
 174                        ret = -ENOMEM;
 175                        goto exit;
 176                }
 177
 178                WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
 179
 180                paddr = dma_map_single(ab->dev, skb->data,
 181                                       skb->len + skb_tailroom(skb),
 182                                       DMA_FROM_DEVICE);
 183                if (unlikely(dma_mapping_error(ab->dev, paddr))) {
 184                        ath11k_warn(ab, "failed to dma map ce rx buf\n");
 185                        dev_kfree_skb_any(skb);
 186                        ret = -EIO;
 187                        goto exit;
 188                }
 189
 190                ATH11K_SKB_RXCB(skb)->paddr = paddr;
 191
 192                ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
 193
 194                if (ret) {
 195                        ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
 196                        dma_unmap_single(ab->dev, paddr,
 197                                         skb->len + skb_tailroom(skb),
 198                                         DMA_FROM_DEVICE);
 199                        dev_kfree_skb_any(skb);
 200                        goto exit;
 201                }
 202        }
 203
 204exit:
 205        spin_unlock_bh(&ab->ce.ce_lock);
 206        return ret;
 207}
 208
 209static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
 210                                         struct sk_buff **skb, int *nbytes)
 211{
 212        struct ath11k_base *ab = pipe->ab;
 213        struct hal_srng *srng;
 214        unsigned int sw_index;
 215        unsigned int nentries_mask;
 216        u32 *desc;
 217        int ret = 0;
 218
 219        spin_lock_bh(&ab->ce.ce_lock);
 220
 221        sw_index = pipe->dest_ring->sw_index;
 222        nentries_mask = pipe->dest_ring->nentries_mask;
 223
 224        srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
 225
 226        spin_lock_bh(&srng->lock);
 227
 228        ath11k_hal_srng_access_begin(ab, srng);
 229
 230        desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
 231        if (!desc) {
 232                ret = -EIO;
 233                goto err;
 234        }
 235
 236        *nbytes = ath11k_hal_ce_dst_status_get_length(desc);
 237        if (*nbytes == 0) {
 238                ret = -EIO;
 239                goto err;
 240        }
 241
 242        *skb = pipe->dest_ring->skb[sw_index];
 243        pipe->dest_ring->skb[sw_index] = NULL;
 244
 245        sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
 246        pipe->dest_ring->sw_index = sw_index;
 247
 248        pipe->rx_buf_needed++;
 249err:
 250        ath11k_hal_srng_access_end(ab, srng);
 251
 252        spin_unlock_bh(&srng->lock);
 253
 254        spin_unlock_bh(&ab->ce.ce_lock);
 255
 256        return ret;
 257}
 258
 259static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
 260{
 261        struct ath11k_base *ab = pipe->ab;
 262        struct sk_buff *skb;
 263        struct sk_buff_head list;
 264        unsigned int nbytes, max_nbytes;
 265        int ret;
 266
 267        __skb_queue_head_init(&list);
 268        while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
 269                max_nbytes = skb->len + skb_tailroom(skb);
 270                dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
 271                                 max_nbytes, DMA_FROM_DEVICE);
 272
 273                if (unlikely(max_nbytes < nbytes)) {
 274                        ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
 275                                    nbytes, max_nbytes);
 276                        dev_kfree_skb_any(skb);
 277                        continue;
 278                }
 279
 280                skb_put(skb, nbytes);
 281                __skb_queue_tail(&list, skb);
 282        }
 283
 284        while ((skb = __skb_dequeue(&list))) {
 285                ath11k_dbg(ab, ATH11K_DBG_AHB, "rx ce pipe %d len %d\n",
 286                           pipe->pipe_num, skb->len);
 287                pipe->recv_cb(ab, skb);
 288        }
 289
 290        ret = ath11k_ce_rx_post_pipe(pipe);
 291        if (ret && ret != -ENOSPC) {
 292                ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
 293                            pipe->pipe_num, ret);
 294                mod_timer(&ab->rx_replenish_retry,
 295                          jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
 296        }
 297}
 298
 299static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe)
 300{
 301        struct ath11k_base *ab = pipe->ab;
 302        struct hal_srng *srng;
 303        unsigned int sw_index;
 304        unsigned int nentries_mask;
 305        struct sk_buff *skb;
 306        u32 *desc;
 307
 308        spin_lock_bh(&ab->ce.ce_lock);
 309
 310        sw_index = pipe->src_ring->sw_index;
 311        nentries_mask = pipe->src_ring->nentries_mask;
 312
 313        srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
 314
 315        spin_lock_bh(&srng->lock);
 316
 317        ath11k_hal_srng_access_begin(ab, srng);
 318
 319        desc = ath11k_hal_srng_src_reap_next(ab, srng);
 320        if (!desc) {
 321                skb = ERR_PTR(-EIO);
 322                goto err_unlock;
 323        }
 324
 325        skb = pipe->src_ring->skb[sw_index];
 326
 327        pipe->src_ring->skb[sw_index] = NULL;
 328
 329        sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
 330        pipe->src_ring->sw_index = sw_index;
 331
 332err_unlock:
 333        spin_unlock_bh(&srng->lock);
 334
 335        spin_unlock_bh(&ab->ce.ce_lock);
 336
 337        return skb;
 338}
 339
 340static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe *pipe)
 341{
 342        struct ath11k_base *ab = pipe->ab;
 343        struct sk_buff *skb;
 344
 345        while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
 346                if (!skb)
 347                        continue;
 348
 349                dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
 350                                 DMA_TO_DEVICE);
 351                dev_kfree_skb_any(skb);
 352        }
 353}
 354
 355static int ath11k_ce_init_ring(struct ath11k_base *ab,
 356                               struct ath11k_ce_ring *ce_ring,
 357                               int ce_id, enum hal_ring_type type)
 358{
 359        struct hal_srng_params params = { 0 };
 360        int ret;
 361
 362        params.ring_base_paddr = ce_ring->base_addr_ce_space;
 363        params.ring_base_vaddr = ce_ring->base_addr_owner_space;
 364        params.num_entries = ce_ring->nentries;
 365
 366        switch (type) {
 367        case HAL_CE_SRC:
 368                if (!(CE_ATTR_DIS_INTR & host_ce_config_wlan[ce_id].flags))
 369                        params.intr_batch_cntr_thres_entries = 1;
 370                break;
 371        case HAL_CE_DST:
 372                params.max_buffer_len = host_ce_config_wlan[ce_id].src_sz_max;
 373                if (!(host_ce_config_wlan[ce_id].flags & CE_ATTR_DIS_INTR)) {
 374                        params.intr_timer_thres_us = 1024;
 375                        params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
 376                        params.low_threshold = ce_ring->nentries - 3;
 377                }
 378                break;
 379        case HAL_CE_DST_STATUS:
 380                if (!(host_ce_config_wlan[ce_id].flags & CE_ATTR_DIS_INTR)) {
 381                        params.intr_batch_cntr_thres_entries = 1;
 382                        params.intr_timer_thres_us = 0x1000;
 383                }
 384                break;
 385        default:
 386                ath11k_warn(ab, "Invalid CE ring type %d\n", type);
 387                return -EINVAL;
 388        }
 389
 390        /* TODO: Init other params needed by HAL to init the ring */
 391
 392        ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, &params);
 393        if (ret < 0) {
 394                ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
 395                            ret, ce_id);
 396                return ret;
 397        }
 398        ce_ring->hal_ring_id = ret;
 399
 400        return 0;
 401}
 402
 403static struct ath11k_ce_ring *
 404ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
 405{
 406        struct ath11k_ce_ring *ce_ring;
 407        dma_addr_t base_addr;
 408
 409        ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
 410        if (ce_ring == NULL)
 411                return ERR_PTR(-ENOMEM);
 412
 413        ce_ring->nentries = nentries;
 414        ce_ring->nentries_mask = nentries - 1;
 415
 416        /* Legacy platforms that do not support cache
 417         * coherent DMA are unsupported
 418         */
 419        ce_ring->base_addr_owner_space_unaligned =
 420                dma_alloc_coherent(ab->dev,
 421                                   nentries * desc_sz + CE_DESC_RING_ALIGN,
 422                                   &base_addr, GFP_KERNEL);
 423        if (!ce_ring->base_addr_owner_space_unaligned) {
 424                kfree(ce_ring);
 425                return ERR_PTR(-ENOMEM);
 426        }
 427
 428        ce_ring->base_addr_ce_space_unaligned = base_addr;
 429
 430        ce_ring->base_addr_owner_space = PTR_ALIGN(
 431                        ce_ring->base_addr_owner_space_unaligned,
 432                        CE_DESC_RING_ALIGN);
 433        ce_ring->base_addr_ce_space = ALIGN(
 434                        ce_ring->base_addr_ce_space_unaligned,
 435                        CE_DESC_RING_ALIGN);
 436
 437        return ce_ring;
 438}
 439
 440static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
 441{
 442        struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
 443        const struct ce_attr *attr = &host_ce_config_wlan[ce_id];
 444        struct ath11k_ce_ring *ring;
 445        int nentries;
 446        int desc_sz;
 447
 448        pipe->attr_flags = attr->flags;
 449
 450        if (attr->src_nentries) {
 451                pipe->send_cb = ath11k_ce_send_done_cb;
 452                nentries = roundup_pow_of_two(attr->src_nentries);
 453                desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
 454                ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
 455                if (IS_ERR(ring))
 456                        return PTR_ERR(ring);
 457                pipe->src_ring = ring;
 458        }
 459
 460        if (attr->dest_nentries) {
 461                pipe->recv_cb = attr->recv_cb;
 462                nentries = roundup_pow_of_two(attr->dest_nentries);
 463                desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
 464                ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
 465                if (IS_ERR(ring))
 466                        return PTR_ERR(ring);
 467                pipe->dest_ring = ring;
 468
 469                desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
 470                ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
 471                if (IS_ERR(ring))
 472                        return PTR_ERR(ring);
 473                pipe->status_ring = ring;
 474        }
 475
 476        return 0;
 477}
 478
 479void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
 480{
 481        struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
 482
 483        if (pipe->send_cb)
 484                pipe->send_cb(pipe);
 485
 486        if (pipe->recv_cb)
 487                ath11k_ce_recv_process_cb(pipe);
 488}
 489
 490void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
 491{
 492        struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
 493
 494        if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
 495                pipe->send_cb(pipe);
 496}
 497
 498int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
 499                   u16 transfer_id)
 500{
 501        struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
 502        struct hal_srng *srng;
 503        u32 *desc;
 504        unsigned int write_index, sw_index;
 505        unsigned int nentries_mask;
 506        int ret = 0;
 507        u8 byte_swap_data = 0;
 508        int num_used;
 509
 510        /* Check if some entries could be regained by handling tx completion if
 511         * the CE has interrupts disabled and the used entries is more than the
 512         * defined usage threshold.
 513         */
 514        if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
 515                spin_lock_bh(&ab->ce.ce_lock);
 516                write_index = pipe->src_ring->write_index;
 517
 518                sw_index = pipe->src_ring->sw_index;
 519
 520                if (write_index >= sw_index)
 521                        num_used = write_index - sw_index;
 522                else
 523                        num_used = pipe->src_ring->nentries - sw_index +
 524                                   write_index;
 525
 526                spin_unlock_bh(&ab->ce.ce_lock);
 527
 528                if (num_used > ATH11K_CE_USAGE_THRESHOLD)
 529                        ath11k_ce_poll_send_completed(ab, pipe->pipe_num);
 530        }
 531
 532        if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
 533                return -ESHUTDOWN;
 534
 535        spin_lock_bh(&ab->ce.ce_lock);
 536
 537        write_index = pipe->src_ring->write_index;
 538        nentries_mask = pipe->src_ring->nentries_mask;
 539
 540        srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
 541
 542        spin_lock_bh(&srng->lock);
 543
 544        ath11k_hal_srng_access_begin(ab, srng);
 545
 546        if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
 547                ath11k_hal_srng_access_end(ab, srng);
 548                ret = -ENOBUFS;
 549                goto err_unlock;
 550        }
 551
 552        desc = ath11k_hal_srng_src_get_next_reaped(ab, srng);
 553        if (!desc) {
 554                ath11k_hal_srng_access_end(ab, srng);
 555                ret = -ENOBUFS;
 556                goto err_unlock;
 557        }
 558
 559        if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
 560                byte_swap_data = 1;
 561
 562        ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr,
 563                                   skb->len, transfer_id, byte_swap_data);
 564
 565        pipe->src_ring->skb[write_index] = skb;
 566        pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
 567                                                       write_index);
 568
 569        ath11k_hal_srng_access_end(ab, srng);
 570
 571        spin_unlock_bh(&srng->lock);
 572
 573        spin_unlock_bh(&ab->ce.ce_lock);
 574
 575        return 0;
 576
 577err_unlock:
 578        spin_unlock_bh(&srng->lock);
 579
 580        spin_unlock_bh(&ab->ce.ce_lock);
 581
 582        return ret;
 583}
 584
 585static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
 586{
 587        struct ath11k_base *ab = pipe->ab;
 588        struct ath11k_ce_ring *ring = pipe->dest_ring;
 589        struct sk_buff *skb;
 590        int i;
 591
 592        if (!(ring && pipe->buf_sz))
 593                return;
 594
 595        for (i = 0; i < ring->nentries; i++) {
 596                skb = ring->skb[i];
 597                if (!skb)
 598                        continue;
 599
 600                ring->skb[i] = NULL;
 601                dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
 602                                 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
 603                dev_kfree_skb_any(skb);
 604        }
 605}
 606
 607void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
 608{
 609        struct ath11k_ce_pipe *pipe;
 610        int pipe_num;
 611
 612        for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
 613                pipe = &ab->ce.ce_pipe[pipe_num];
 614                ath11k_ce_rx_pipe_cleanup(pipe);
 615
 616                /* Cleanup any src CE's which have interrupts disabled */
 617                ath11k_ce_poll_send_completed(ab, pipe_num);
 618
 619                /* NOTE: Should we also clean up tx buffer in all pipes? */
 620        }
 621}
 622
 623void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
 624{
 625        struct ath11k_ce_pipe *pipe;
 626        int i;
 627        int ret;
 628
 629        for (i = 0; i < CE_COUNT; i++) {
 630                pipe = &ab->ce.ce_pipe[i];
 631                ret = ath11k_ce_rx_post_pipe(pipe);
 632                if (ret) {
 633                        if (ret == -ENOSPC)
 634                                continue;
 635
 636                        ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
 637                                    i, ret);
 638                        mod_timer(&ab->rx_replenish_retry,
 639                                  jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
 640
 641                        return;
 642                }
 643        }
 644}
 645
 646void ath11k_ce_rx_replenish_retry(struct timer_list *t)
 647{
 648        struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry);
 649
 650        ath11k_ce_rx_post_buf(ab);
 651}
 652
 653int ath11k_ce_init_pipes(struct ath11k_base *ab)
 654{
 655        struct ath11k_ce_pipe *pipe;
 656        int i;
 657        int ret;
 658
 659        for (i = 0; i < CE_COUNT; i++) {
 660                pipe = &ab->ce.ce_pipe[i];
 661
 662                if (pipe->src_ring) {
 663                        ret = ath11k_ce_init_ring(ab, pipe->src_ring, i,
 664                                                  HAL_CE_SRC);
 665                        if (ret) {
 666                                ath11k_warn(ab, "failed to init src ring: %d\n",
 667                                            ret);
 668                                /* Should we clear any partial init */
 669                                return ret;
 670                        }
 671
 672                        pipe->src_ring->write_index = 0;
 673                        pipe->src_ring->sw_index = 0;
 674                }
 675
 676                if (pipe->dest_ring) {
 677                        ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i,
 678                                                  HAL_CE_DST);
 679                        if (ret) {
 680                                ath11k_warn(ab, "failed to init dest ring: %d\n",
 681                                            ret);
 682                                /* Should we clear any partial init */
 683                                return ret;
 684                        }
 685
 686                        pipe->rx_buf_needed = pipe->dest_ring->nentries ?
 687                                              pipe->dest_ring->nentries - 2 : 0;
 688
 689                        pipe->dest_ring->write_index = 0;
 690                        pipe->dest_ring->sw_index = 0;
 691                }
 692
 693                if (pipe->status_ring) {
 694                        ret = ath11k_ce_init_ring(ab, pipe->status_ring, i,
 695                                                  HAL_CE_DST_STATUS);
 696                        if (ret) {
 697                                ath11k_warn(ab, "failed to init dest status ing: %d\n",
 698                                            ret);
 699                                /* Should we clear any partial init */
 700                                return ret;
 701                        }
 702
 703                        pipe->status_ring->write_index = 0;
 704                        pipe->status_ring->sw_index = 0;
 705                }
 706        }
 707
 708        return 0;
 709}
 710
 711void ath11k_ce_free_pipes(struct ath11k_base *ab)
 712{
 713        struct ath11k_ce_pipe *pipe;
 714        int desc_sz;
 715        int i;
 716
 717        for (i = 0; i < CE_COUNT; i++) {
 718                pipe = &ab->ce.ce_pipe[i];
 719
 720                if (pipe->src_ring) {
 721                        desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
 722                        dma_free_coherent(ab->dev,
 723                                          pipe->src_ring->nentries * desc_sz +
 724                                          CE_DESC_RING_ALIGN,
 725                                          pipe->src_ring->base_addr_owner_space,
 726                                          pipe->src_ring->base_addr_ce_space);
 727                        kfree(pipe->src_ring);
 728                        pipe->src_ring = NULL;
 729                }
 730
 731                if (pipe->dest_ring) {
 732                        desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
 733                        dma_free_coherent(ab->dev,
 734                                          pipe->dest_ring->nentries * desc_sz +
 735                                          CE_DESC_RING_ALIGN,
 736                                          pipe->dest_ring->base_addr_owner_space,
 737                                          pipe->dest_ring->base_addr_ce_space);
 738                        kfree(pipe->dest_ring);
 739                        pipe->dest_ring = NULL;
 740                }
 741
 742                if (pipe->status_ring) {
 743                        desc_sz =
 744                          ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
 745                        dma_free_coherent(ab->dev,
 746                                          pipe->status_ring->nentries * desc_sz +
 747                                          CE_DESC_RING_ALIGN,
 748                                          pipe->status_ring->base_addr_owner_space,
 749                                          pipe->status_ring->base_addr_ce_space);
 750                        kfree(pipe->status_ring);
 751                        pipe->status_ring = NULL;
 752                }
 753        }
 754}
 755
 756int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
 757{
 758        struct ath11k_ce_pipe *pipe;
 759        int i;
 760        int ret;
 761        const struct ce_attr *attr;
 762
 763        spin_lock_init(&ab->ce.ce_lock);
 764
 765        for (i = 0; i < CE_COUNT; i++) {
 766                attr = &host_ce_config_wlan[i];
 767                pipe = &ab->ce.ce_pipe[i];
 768                pipe->pipe_num = i;
 769                pipe->ab = ab;
 770                pipe->buf_sz = attr->src_sz_max;
 771
 772                ret = ath11k_ce_alloc_pipe(ab, i);
 773                if (ret) {
 774                        /* Free any parial successful allocation */
 775                        ath11k_ce_free_pipes(ab);
 776                        return ret;
 777                }
 778        }
 779
 780        return 0;
 781}
 782
 783/* For Big Endian Host, Copy Engine byte_swap is enabled
 784 * When Copy Engine does byte_swap, need to byte swap again for the
 785 * Host to get/put buffer content in the correct byte order
 786 */
 787void ath11k_ce_byte_swap(void *mem, u32 len)
 788{
 789        int i;
 790
 791        if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
 792                if (!mem)
 793                        return;
 794
 795                for (i = 0; i < (len / 4); i++) {
 796                        *(u32 *)mem = swab32(*(u32 *)mem);
 797                        mem += 4;
 798                }
 799        }
 800}
 801
 802int ath11k_ce_get_attr_flags(int ce_id)
 803{
 804        if (ce_id >= CE_COUNT)
 805                return -EINVAL;
 806
 807        return host_ce_config_wlan[ce_id].flags;
 808}
 809