linux/drivers/dma/xilinx/xilinx_ps_pcie_platform.c
<<
>>
Prefs
   1/*
   2 * XILINX PS PCIe DMA driver
   3 *
   4 * Copyright (C) 2017 Xilinx, Inc. All rights reserved.
   5 *
   6 * Description
   7 * PS PCIe DMA is memory mapped DMA used to execute PS to PL transfers
   8 * on ZynqMP UltraScale+ Devices
   9 *
  10 * This program is free software: you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation
  13 */
  14
  15#include "xilinx_ps_pcie.h"
  16#include "../dmaengine.h"
  17
  18#define PLATFORM_DRIVER_NAME              "ps_pcie_pform_dma"
  19#define MAX_BARS 6
  20
  21#define DMA_BAR_NUMBER 0
  22
  23#define MIN_SW_INTR_TRANSACTIONS       2
  24
  25#define CHANNEL_PROPERTY_LENGTH 50
  26#define WORKQ_NAME_SIZE         100
  27#define INTR_HANDLR_NAME_SIZE   100
  28
  29#define PS_PCIE_DMA_IRQ_NOSHARE    0
  30
  31#define MAX_COALESCE_COUNT     255
  32
  33#define DMA_CHANNEL_REGS_SIZE 0x80
  34
  35#define DMA_SRCQPTRLO_REG_OFFSET  (0x00) /* Source Q pointer Lo */
  36#define DMA_SRCQPTRHI_REG_OFFSET  (0x04) /* Source Q pointer Hi */
  37#define DMA_SRCQSZ_REG_OFFSET     (0x08) /* Source Q size */
  38#define DMA_SRCQLMT_REG_OFFSET    (0x0C) /* Source Q limit */
  39#define DMA_DSTQPTRLO_REG_OFFSET  (0x10) /* Destination Q pointer Lo */
  40#define DMA_DSTQPTRHI_REG_OFFSET  (0x14) /* Destination Q pointer Hi */
  41#define DMA_DSTQSZ_REG_OFFSET     (0x18) /* Destination Q size */
  42#define DMA_DSTQLMT_REG_OFFSET    (0x1C) /* Destination Q limit */
  43#define DMA_SSTAQPTRLO_REG_OFFSET (0x20) /* Source Status Q pointer Lo */
  44#define DMA_SSTAQPTRHI_REG_OFFSET (0x24) /* Source Status Q pointer Hi */
  45#define DMA_SSTAQSZ_REG_OFFSET    (0x28) /* Source Status Q size */
  46#define DMA_SSTAQLMT_REG_OFFSET   (0x2C) /* Source Status Q limit */
  47#define DMA_DSTAQPTRLO_REG_OFFSET (0x30) /* Destination Status Q pointer Lo */
  48#define DMA_DSTAQPTRHI_REG_OFFSET (0x34) /* Destination Status Q pointer Hi */
  49#define DMA_DSTAQSZ_REG_OFFSET    (0x38) /* Destination Status Q size */
  50#define DMA_DSTAQLMT_REG_OFFSET   (0x3C) /* Destination Status Q limit */
  51#define DMA_SRCQNXT_REG_OFFSET    (0x40) /* Source Q next */
  52#define DMA_DSTQNXT_REG_OFFSET    (0x44) /* Destination Q next */
  53#define DMA_SSTAQNXT_REG_OFFSET   (0x48) /* Source Status Q next */
  54#define DMA_DSTAQNXT_REG_OFFSET   (0x4C) /* Destination Status Q next */
  55#define DMA_SCRATCH0_REG_OFFSET   (0x50) /* Scratch pad register 0 */
  56
  57#define DMA_PCIE_INTR_CNTRL_REG_OFFSET  (0x60) /* DMA PCIe intr control reg */
  58#define DMA_PCIE_INTR_STATUS_REG_OFFSET (0x64) /* DMA PCIe intr status reg */
  59#define DMA_AXI_INTR_CNTRL_REG_OFFSET   (0x68) /* DMA AXI intr control reg */
  60#define DMA_AXI_INTR_STATUS_REG_OFFSET  (0x6C) /* DMA AXI intr status reg */
  61#define DMA_PCIE_INTR_ASSRT_REG_OFFSET  (0x70) /* PCIe intr assert reg */
  62#define DMA_AXI_INTR_ASSRT_REG_OFFSET   (0x74) /* AXI intr assert register */
  63#define DMA_CNTRL_REG_OFFSET            (0x78) /* DMA control register */
  64#define DMA_STATUS_REG_OFFSET           (0x7C) /* DMA status register */
  65
  66#define DMA_CNTRL_RST_BIT               BIT(1)
  67#define DMA_CNTRL_64BIT_STAQ_ELEMSZ_BIT BIT(2)
  68#define DMA_CNTRL_ENABL_BIT             BIT(0)
  69#define DMA_STATUS_DMA_PRES_BIT         BIT(15)
  70#define DMA_STATUS_DMA_RUNNING_BIT      BIT(0)
  71#define DMA_QPTRLO_QLOCAXI_BIT          BIT(0)
  72#define DMA_QPTRLO_Q_ENABLE_BIT         BIT(1)
  73#define DMA_INTSTATUS_DMAERR_BIT        BIT(1)
  74#define DMA_INTSTATUS_SGLINTR_BIT       BIT(2)
  75#define DMA_INTSTATUS_SWINTR_BIT        BIT(3)
  76#define DMA_INTCNTRL_ENABLINTR_BIT      BIT(0)
  77#define DMA_INTCNTRL_DMAERRINTR_BIT     BIT(1)
  78#define DMA_INTCNTRL_DMASGINTR_BIT      BIT(2)
  79#define DMA_SW_INTR_ASSRT_BIT           BIT(3)
  80
  81#define SOURCE_CONTROL_BD_BYTE_COUNT_MASK       GENMASK(23, 0)
  82#define SOURCE_CONTROL_BD_LOC_AXI               BIT(24)
  83#define SOURCE_CONTROL_BD_EOP_BIT               BIT(25)
  84#define SOURCE_CONTROL_BD_INTR_BIT              BIT(26)
  85#define SOURCE_CONTROL_BACK_TO_BACK_PACK_BIT    BIT(25)
  86#define SOURCE_CONTROL_ATTRIBUTES_MASK          GENMASK(31, 28)
  87#define SRC_CTL_ATTRIB_BIT_SHIFT                (29)
  88
  89#define STA_BD_COMPLETED_BIT            BIT(0)
  90#define STA_BD_SOURCE_ERROR_BIT         BIT(1)
  91#define STA_BD_DESTINATION_ERROR_BIT    BIT(2)
  92#define STA_BD_INTERNAL_ERROR_BIT       BIT(3)
  93#define STA_BD_UPPER_STATUS_NONZERO_BIT BIT(31)
  94#define STA_BD_BYTE_COUNT_MASK          GENMASK(30, 4)
  95
  96#define STA_BD_BYTE_COUNT_SHIFT         4
  97
  98#define DMA_INTCNTRL_SGCOLSCCNT_BIT_SHIFT (16)
  99
 100#define DMA_SRC_Q_LOW_BIT_SHIFT   GENMASK(5, 0)
 101
 102#define MAX_TRANSFER_LENGTH       0x1000000
 103
 104#define AXI_ATTRIBUTE       0x3
 105#define PCI_ATTRIBUTE       0x2
 106
 107#define ROOTDMA_Q_READ_ATTRIBUTE 0x8
 108
 109/*
 110 * User Id programmed into Source Q will be copied into Status Q of Destination
 111 */
 112#define DEFAULT_UID 1
 113
 114/*
 115 * DMA channel registers
 116 */
 117struct DMA_ENGINE_REGISTERS {
 118        u32 src_q_low;          /* 0x00 */
 119        u32 src_q_high;         /* 0x04 */
 120        u32 src_q_size;         /* 0x08 */
 121        u32 src_q_limit;        /* 0x0C */
 122        u32 dst_q_low;          /* 0x10 */
 123        u32 dst_q_high;         /* 0x14 */
 124        u32 dst_q_size;         /* 0x18 */
 125        u32 dst_q_limit;        /* 0x1c */
 126        u32 stas_q_low;         /* 0x20 */
 127        u32 stas_q_high;        /* 0x24 */
 128        u32 stas_q_size;        /* 0x28 */
 129        u32 stas_q_limit;       /* 0x2C */
 130        u32 stad_q_low;         /* 0x30 */
 131        u32 stad_q_high;        /* 0x34 */
 132        u32 stad_q_size;        /* 0x38 */
 133        u32 stad_q_limit;       /* 0x3C */
 134        u32 src_q_next;         /* 0x40 */
 135        u32 dst_q_next;         /* 0x44 */
 136        u32 stas_q_next;        /* 0x48 */
 137        u32 stad_q_next;        /* 0x4C */
 138        u32 scrathc0;           /* 0x50 */
 139        u32 scrathc1;           /* 0x54 */
 140        u32 scrathc2;           /* 0x58 */
 141        u32 scrathc3;           /* 0x5C */
 142        u32 pcie_intr_cntrl;    /* 0x60 */
 143        u32 pcie_intr_status;   /* 0x64 */
 144        u32 axi_intr_cntrl;     /* 0x68 */
 145        u32 axi_intr_status;    /* 0x6C */
 146        u32 pcie_intr_assert;   /* 0x70 */
 147        u32 axi_intr_assert;    /* 0x74 */
 148        u32 dma_channel_ctrl;   /* 0x78 */
 149        u32 dma_channel_status; /* 0x7C */
 150} __attribute__((__packed__));
 151
 152/**
 153 * struct SOURCE_DMA_DESCRIPTOR - Source Hardware Descriptor
 154 * @system_address: 64 bit buffer physical address
 155 * @control_byte_count: Byte count/buffer length and control flags
 156 * @user_handle: User handle gets copied to status q on completion
 157 * @user_id: User id gets copied to status q of destination
 158 */
 159struct SOURCE_DMA_DESCRIPTOR {
 160        u64 system_address;
 161        u32 control_byte_count;
 162        u16 user_handle;
 163        u16 user_id;
 164} __attribute__((__packed__));
 165
 166/**
 167 * struct DEST_DMA_DESCRIPTOR - Destination Hardware Descriptor
 168 * @system_address: 64 bit buffer physical address
 169 * @control_byte_count: Byte count/buffer length and control flags
 170 * @user_handle: User handle gets copied to status q on completion
 171 * @reserved: Reserved field
 172 */
 173struct DEST_DMA_DESCRIPTOR {
 174        u64 system_address;
 175        u32 control_byte_count;
 176        u16 user_handle;
 177        u16 reserved;
 178} __attribute__((__packed__));
 179
 180/**
 181 * struct STATUS_DMA_DESCRIPTOR - Status Hardware Descriptor
 182 * @status_flag_byte_count: Byte count/buffer length and status flags
 183 * @user_handle: User handle gets copied from src/dstq on completion
 184 * @user_id: User id gets copied from srcq
 185 */
 186struct STATUS_DMA_DESCRIPTOR {
 187        u32 status_flag_byte_count;
 188        u16 user_handle;
 189        u16 user_id;
 190} __attribute__((__packed__));
 191
 192enum PACKET_CONTEXT_AVAILABILITY {
 193        FREE = 0,    /*Packet transfer Parameter context is free.*/
 194        IN_USE       /*Packet transfer Parameter context is in use.*/
 195};
 196
 197struct ps_pcie_transfer_elements {
 198        struct scatterlist *src_sgl;
 199        unsigned int srcq_num_elemets;
 200        struct scatterlist *dst_sgl;
 201        unsigned int dstq_num_elemets;
 202};
 203
 204struct  ps_pcie_tx_segment {
 205        struct list_head node;
 206        struct dma_async_tx_descriptor async_tx;
 207        struct ps_pcie_transfer_elements tx_elements;
 208};
 209
 210struct ps_pcie_intr_segment {
 211        struct list_head node;
 212        struct dma_async_tx_descriptor async_intr_tx;
 213};
 214
 215/*
 216 * The context structure stored for each DMA transaction
 217 * This structure is maintained separately for Src Q and Destination Q
 218 * @availability_status: Indicates whether packet context is available
 219 * @idx_sop: Indicates starting index of buffer descriptor for a transfer
 220 * @idx_eop: Indicates ending index of buffer descriptor for a transfer
 221 * @sgl: Indicates either src or dst sglist for the transaction
 222 */
 223struct PACKET_TRANSFER_PARAMS {
 224        enum PACKET_CONTEXT_AVAILABILITY availability_status;
 225        u16 idx_sop;
 226        u16 idx_eop;
 227        struct scatterlist *sgl;
 228        struct ps_pcie_tx_segment *seg;
 229        u32 requested_bytes;
 230};
 231
 232enum CHANNEL_STATE {
 233        CHANNEL_RESOURCE_UNALLOCATED = 0, /*  Channel resources not allocated */
 234        CHANNEL_UNAVIALBLE,               /*  Channel inactive */
 235        CHANNEL_AVAILABLE,                /*  Channel available for transfers */
 236        CHANNEL_ERROR                     /*  Channel encountered errors */
 237};
 238
 239enum BUFFER_LOCATION {
 240        BUFFER_LOC_PCI = 0,
 241        BUFFER_LOC_AXI,
 242        BUFFER_LOC_INVALID
 243};
 244
 245enum dev_channel_properties {
 246        DMA_CHANNEL_DIRECTION = 0,
 247        NUM_DESCRIPTORS,
 248        NUM_QUEUES,
 249        COALESE_COUNT,
 250        POLL_TIMER_FREQUENCY
 251};
 252
 253/*
 254 * struct ps_pcie_dma_chan - Driver specific DMA channel structure
 255 * @xdev: Driver specific device structure
 256 * @dev: The dma device
 257 * @common:  DMA common channel
 258 * @chan_base: Pointer to Channel registers
 259 * @channel_number: DMA channel number in the device
 260 * @num_queues: Number of queues per channel.
 261 *              It should be four for memory mapped case and
 262 *              two for Streaming case
 263 * @direction: Transfer direction
 264 * @state: Indicates channel state
 265 * @channel_lock: Spin lock to be used before changing channel state
 266 * @cookie_lock: Spin lock to be used before assigning cookie for a transaction
 267 * @coalesce_count: Indicates number of packet transfers before interrupts
 268 * @poll_timer_freq:Indicates frequency of polling for completed transactions
 269 * @poll_timer: Timer to poll dma buffer descriptors if coalesce count is > 0
 270 * @src_avail_descriptors: Available sgl source descriptors
 271 * @src_desc_lock: Lock for synchronizing src_avail_descriptors
 272 * @dst_avail_descriptors: Available sgl destination descriptors
 273 * @dst_desc_lock: Lock for synchronizing
 274 *              dst_avail_descriptors
 275 * @src_sgl_bd_pa: Physical address of Source SGL buffer Descriptors
 276 * @psrc_sgl_bd: Virtual address of Source SGL buffer Descriptors
 277 * @src_sgl_freeidx: Holds index of Source SGL buffer descriptor to be filled
 278 * @sglDestinationQLock:Lock to serialize Destination Q updates
 279 * @dst_sgl_bd_pa: Physical address of Dst SGL buffer Descriptors
 280 * @pdst_sgl_bd: Virtual address of Dst SGL buffer Descriptors
 281 * @dst_sgl_freeidx: Holds index of Destination SGL
 282 * @src_sta_bd_pa: Physical address of StatusQ buffer Descriptors
 283 * @psrc_sta_bd: Virtual address of Src StatusQ buffer Descriptors
 284 * @src_staprobe_idx: Holds index of Status Q to be examined for SrcQ updates
 285 * @src_sta_hw_probe_idx: Holds index of maximum limit of Status Q for hardware
 286 * @dst_sta_bd_pa: Physical address of Dst StatusQ buffer Descriptor
 287 * @pdst_sta_bd: Virtual address of Dst Status Q buffer Descriptors
 288 * @dst_staprobe_idx: Holds index of Status Q to be examined for updates
 289 * @dst_sta_hw_probe_idx: Holds index of max limit of Dst Status Q for hardware
 290 * @@read_attribute: Describes the attributes of buffer in srcq
 291 * @@write_attribute: Describes the attributes of buffer in dstq
 292 * @@intr_status_offset: Register offset to be cheked on receiving interrupt
 293 * @@intr_status_offset: Register offset to be used to control interrupts
 294 * @ppkt_ctx_srcq: Virtual address of packet context to Src Q updates
 295 * @idx_ctx_srcq_head: Holds index of packet context to be filled for Source Q
 296 * @idx_ctx_srcq_tail: Holds index of packet context to be examined for Source Q
 297 * @ppkt_ctx_dstq: Virtual address of packet context to Dst Q updates
 298 * @idx_ctx_dstq_head: Holds index of packet context to be filled for Dst Q
 299 * @idx_ctx_dstq_tail: Holds index of packet context to be examined for Dst Q
 300 * @pending_list_lock: Lock to be taken before updating pending transfers list
 301 * @pending_list: List of transactions submitted to channel
 302 * @active_list_lock: Lock to be taken before transferring transactions from
 303 *                      pending list to active list which will be subsequently
 304 *                              submitted to hardware
 305 * @active_list: List of transactions that will be submitted to hardware
 306 * @pending_interrupts_lock: Lock to be taken before updating pending Intr list
 307 * @pending_interrupts_list: List of interrupt transactions submitted to channel
 308 * @active_interrupts_lock: Lock to be taken before transferring transactions
 309 *                      from pending interrupt list to active interrupt list
 310 * @active_interrupts_list: List of interrupt transactions that are active
 311 * @transactions_pool: Mem pool to allocate dma transactions quickly
 312 * @intr_transactions_pool: Mem pool to allocate interrupt transactions quickly
 313 * @sw_intrs_wrkq: Work Q which performs handling of software intrs
 314 * @handle_sw_intrs:Work function handling software interrupts
 315 * @maintenance_workq: Work Q to perform maintenance tasks during stop or error
 316 * @handle_chan_reset: Work that invokes channel reset function
 317 * @handle_chan_shutdown: Work that invokes channel shutdown function
 318 * @handle_chan_terminate: Work that invokes channel transactions termination
 319 * @chan_shutdown_complt: Completion variable which says shutdown is done
 320 * @chan_terminate_complete: Completion variable which says terminate is done
 321 * @primary_desc_cleanup: Work Q which performs work related to sgl handling
 322 * @handle_primary_desc_cleanup: Work that invokes src Q, dst Q cleanup
 323 *                              and programming
 324 * @chan_programming: Work Q which performs work related to channel programming
 325 * @handle_chan_programming: Work that invokes channel programming function
 326 * @srcq_desc_cleanup: Work Q which performs src Q descriptor cleanup
 327 * @handle_srcq_desc_cleanup: Work function handling Src Q completions
 328 * @dstq_desc_cleanup: Work Q which performs dst Q descriptor cleanup
 329 * @handle_dstq_desc_cleanup: Work function handling Dst Q completions
 330 * @srcq_work_complete: Src Q Work completion variable for primary work
 331 * @dstq_work_complete: Dst Q Work completion variable for primary work
 332 */
 333struct ps_pcie_dma_chan {
 334        struct xlnx_pcie_dma_device *xdev;
 335        struct device *dev;
 336
 337        struct dma_chan common;
 338
 339        struct DMA_ENGINE_REGISTERS *chan_base;
 340        u16 channel_number;
 341
 342        u32 num_queues;
 343        enum dma_data_direction direction;
 344        enum BUFFER_LOCATION srcq_buffer_location;
 345        enum BUFFER_LOCATION dstq_buffer_location;
 346
 347        u32 total_descriptors;
 348
 349        enum CHANNEL_STATE state;
 350        spinlock_t channel_lock; /* For changing channel state */
 351
 352        spinlock_t cookie_lock;  /* For acquiring cookie from dma framework*/
 353
 354        u32 coalesce_count;
 355        u32 poll_timer_freq;
 356
 357        struct timer_list poll_timer;
 358
 359        u32 src_avail_descriptors;
 360        spinlock_t src_desc_lock; /* For handling srcq available descriptors */
 361
 362        u32 dst_avail_descriptors;
 363        spinlock_t dst_desc_lock; /* For handling dstq available descriptors */
 364
 365        dma_addr_t src_sgl_bd_pa;
 366        struct SOURCE_DMA_DESCRIPTOR *psrc_sgl_bd;
 367        u32 src_sgl_freeidx;
 368
 369        dma_addr_t dst_sgl_bd_pa;
 370        struct DEST_DMA_DESCRIPTOR *pdst_sgl_bd;
 371        u32 dst_sgl_freeidx;
 372
 373        dma_addr_t src_sta_bd_pa;
 374        struct STATUS_DMA_DESCRIPTOR *psrc_sta_bd;
 375        u32 src_staprobe_idx;
 376        u32 src_sta_hw_probe_idx;
 377
 378        dma_addr_t dst_sta_bd_pa;
 379        struct STATUS_DMA_DESCRIPTOR *pdst_sta_bd;
 380        u32 dst_staprobe_idx;
 381        u32 dst_sta_hw_probe_idx;
 382
 383        u32 read_attribute;
 384        u32 write_attribute;
 385
 386        u32 intr_status_offset;
 387        u32 intr_control_offset;
 388
 389        struct PACKET_TRANSFER_PARAMS *ppkt_ctx_srcq;
 390        u16 idx_ctx_srcq_head;
 391        u16 idx_ctx_srcq_tail;
 392
 393        struct PACKET_TRANSFER_PARAMS *ppkt_ctx_dstq;
 394        u16 idx_ctx_dstq_head;
 395        u16 idx_ctx_dstq_tail;
 396
 397        spinlock_t  pending_list_lock; /* For handling dma pending_list */
 398        struct list_head pending_list;
 399        spinlock_t  active_list_lock; /* For handling dma active_list */
 400        struct list_head active_list;
 401
 402        spinlock_t pending_interrupts_lock; /* For dma pending interrupts list*/
 403        struct list_head pending_interrupts_list;
 404        spinlock_t active_interrupts_lock;  /* For dma active interrupts list*/
 405        struct list_head active_interrupts_list;
 406
 407        mempool_t *transactions_pool;
 408        mempool_t *intr_transactions_pool;
 409
 410        struct workqueue_struct *sw_intrs_wrkq;
 411        struct work_struct handle_sw_intrs;
 412
 413        struct workqueue_struct *maintenance_workq;
 414        struct work_struct handle_chan_reset;
 415        struct work_struct handle_chan_shutdown;
 416        struct work_struct handle_chan_terminate;
 417
 418        struct completion chan_shutdown_complt;
 419        struct completion chan_terminate_complete;
 420
 421        struct workqueue_struct *primary_desc_cleanup;
 422        struct work_struct handle_primary_desc_cleanup;
 423
 424        struct workqueue_struct *chan_programming;
 425        struct work_struct handle_chan_programming;
 426
 427        struct workqueue_struct *srcq_desc_cleanup;
 428        struct work_struct handle_srcq_desc_cleanup;
 429        struct completion srcq_work_complete;
 430
 431        struct workqueue_struct *dstq_desc_cleanup;
 432        struct work_struct handle_dstq_desc_cleanup;
 433        struct completion dstq_work_complete;
 434};
 435
 436/*
 437 * struct xlnx_pcie_dma_device - Driver specific platform device structure
 438 * @is_rootdma: Indicates whether the dma instance is root port dma
 439 * @dma_buf_ext_addr: Indicates whether target system is 32 bit or 64 bit
 440 * @bar_mask: Indicates available pcie bars
 441 * @board_number: Count value of platform device
 442 * @dev: Device structure pointer for pcie device
 443 * @channels: Pointer to device DMA channels structure
 444 * @common: DMA device structure
 445 * @num_channels: Number of channels active for the device
 446 * @reg_base: Base address of first DMA channel of the device
 447 * @irq_vecs: Number of irq vectors allocated to pci device
 448 * @pci_dev: Parent pci device which created this platform device
 449 * @bar_info: PCIe bar related information
 450 * @platform_irq_vec: Platform irq vector number for root dma
 451 * @rootdma_vendor: PCI Vendor id for root dma
 452 * @rootdma_device: PCI Device id for root dma
 453 */
 454struct xlnx_pcie_dma_device {
 455        bool is_rootdma;
 456        bool dma_buf_ext_addr;
 457        u32 bar_mask;
 458        u16 board_number;
 459        struct device *dev;
 460        struct ps_pcie_dma_chan *channels;
 461        struct dma_device common;
 462        int num_channels;
 463        int irq_vecs;
 464        void __iomem *reg_base;
 465        struct pci_dev *pci_dev;
 466        struct BAR_PARAMS bar_info[MAX_BARS];
 467        int platform_irq_vec;
 468        u16 rootdma_vendor;
 469        u16 rootdma_device;
 470};
 471
 472#define to_xilinx_chan(chan) \
 473        container_of(chan, struct ps_pcie_dma_chan, common)
 474#define to_ps_pcie_dma_tx_descriptor(tx) \
 475        container_of(tx, struct ps_pcie_tx_segment, async_tx)
 476#define to_ps_pcie_dma_tx_intr_descriptor(tx) \
 477        container_of(tx, struct ps_pcie_intr_segment, async_intr_tx)
 478
 479/* Function Protypes */
 480static u32 ps_pcie_dma_read(struct ps_pcie_dma_chan *chan, u32 reg);
 481static void ps_pcie_dma_write(struct ps_pcie_dma_chan *chan, u32 reg,
 482                              u32 value);
 483static void ps_pcie_dma_clr_mask(struct ps_pcie_dma_chan *chan, u32 reg,
 484                                 u32 mask);
 485static void ps_pcie_dma_set_mask(struct ps_pcie_dma_chan *chan, u32 reg,
 486                                 u32 mask);
 487static int irq_setup(struct xlnx_pcie_dma_device *xdev);
 488static int platform_irq_setup(struct xlnx_pcie_dma_device *xdev);
 489static int chan_intr_setup(struct xlnx_pcie_dma_device *xdev);
 490static int device_intr_setup(struct xlnx_pcie_dma_device *xdev);
 491static int irq_probe(struct xlnx_pcie_dma_device *xdev);
 492static int ps_pcie_check_intr_status(struct ps_pcie_dma_chan *chan);
 493static irqreturn_t ps_pcie_dma_dev_intr_handler(int irq, void *data);
 494static irqreturn_t ps_pcie_dma_chan_intr_handler(int irq, void *data);
 495static int init_hw_components(struct ps_pcie_dma_chan *chan);
 496static int init_sw_components(struct ps_pcie_dma_chan *chan);
 497static void update_channel_read_attribute(struct ps_pcie_dma_chan *chan);
 498static void update_channel_write_attribute(struct ps_pcie_dma_chan *chan);
 499static void ps_pcie_chan_reset(struct ps_pcie_dma_chan *chan);
 500static void poll_completed_transactions(unsigned long arg);
 501static bool check_descriptors_for_two_queues(struct ps_pcie_dma_chan *chan,
 502                                             struct ps_pcie_tx_segment *seg);
 503static bool check_descriptors_for_all_queues(struct ps_pcie_dma_chan *chan,
 504                                             struct ps_pcie_tx_segment *seg);
 505static bool check_descriptor_availability(struct ps_pcie_dma_chan *chan,
 506                                          struct ps_pcie_tx_segment *seg);
 507static void handle_error(struct ps_pcie_dma_chan *chan);
 508static void xlnx_ps_pcie_update_srcq(struct ps_pcie_dma_chan *chan,
 509                                     struct ps_pcie_tx_segment *seg);
 510static void xlnx_ps_pcie_update_dstq(struct ps_pcie_dma_chan *chan,
 511                                     struct ps_pcie_tx_segment *seg);
 512static void ps_pcie_chan_program_work(struct work_struct *work);
 513static void dst_cleanup_work(struct work_struct *work);
 514static void src_cleanup_work(struct work_struct *work);
 515static void ps_pcie_chan_primary_work(struct work_struct *work);
 516static int probe_channel_properties(struct platform_device *platform_dev,
 517                                    struct xlnx_pcie_dma_device *xdev,
 518                                    u16 channel_number);
 519static void xlnx_ps_pcie_destroy_mempool(struct ps_pcie_dma_chan *chan);
 520static void xlnx_ps_pcie_free_worker_queues(struct ps_pcie_dma_chan *chan);
 521static void xlnx_ps_pcie_free_pkt_ctxts(struct ps_pcie_dma_chan *chan);
 522static void xlnx_ps_pcie_free_descriptors(struct ps_pcie_dma_chan *chan);
 523static int xlnx_ps_pcie_channel_activate(struct ps_pcie_dma_chan *chan);
 524static void xlnx_ps_pcie_channel_quiesce(struct ps_pcie_dma_chan *chan);
 525static void ivk_cbk_for_pending(struct ps_pcie_dma_chan *chan);
 526static void xlnx_ps_pcie_reset_channel(struct ps_pcie_dma_chan *chan);
 527static void xlnx_ps_pcie_free_poll_timer(struct ps_pcie_dma_chan *chan);
 528static int xlnx_ps_pcie_alloc_poll_timer(struct ps_pcie_dma_chan *chan);
 529static void terminate_transactions_work(struct work_struct *work);
 530static void chan_shutdown_work(struct work_struct *work);
 531static void chan_reset_work(struct work_struct *work);
 532static int xlnx_ps_pcie_alloc_worker_threads(struct ps_pcie_dma_chan *chan);
 533static int xlnx_ps_pcie_alloc_mempool(struct ps_pcie_dma_chan *chan);
 534static int xlnx_ps_pcie_alloc_pkt_contexts(struct ps_pcie_dma_chan *chan);
 535static int dma_alloc_descriptors_two_queues(struct ps_pcie_dma_chan *chan);
 536static int dma_alloc_decriptors_all_queues(struct ps_pcie_dma_chan *chan);
 537static void xlnx_ps_pcie_dma_free_chan_resources(struct dma_chan *dchan);
 538static int xlnx_ps_pcie_dma_alloc_chan_resources(struct dma_chan *dchan);
 539static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx);
 540static dma_cookie_t xilinx_intr_tx_submit(struct dma_async_tx_descriptor *tx);
 541static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_dma_sg(
 542                struct dma_chan *channel, struct scatterlist *dst_sg,
 543                unsigned int dst_nents, struct scatterlist *src_sg,
 544                unsigned int src_nents, unsigned long flags);
 545static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_slave_sg(
 546                struct dma_chan *channel, struct scatterlist *sgl,
 547                unsigned int sg_len, enum dma_transfer_direction direction,
 548                unsigned long flags, void *context);
 549static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_interrupt(
 550                struct dma_chan *channel, unsigned long flags);
 551static void xlnx_ps_pcie_dma_issue_pending(struct dma_chan *channel);
 552static int xlnx_ps_pcie_dma_terminate_all(struct dma_chan *channel);
 553static int read_rootdma_config(struct platform_device *platform_dev,
 554                               struct xlnx_pcie_dma_device *xdev);
 555static int read_epdma_config(struct platform_device *platform_dev,
 556                             struct xlnx_pcie_dma_device *xdev);
 557static int xlnx_pcie_dma_driver_probe(struct platform_device *platform_dev);
 558static int xlnx_pcie_dma_driver_remove(struct platform_device *platform_dev);
 559
 560/* IO accessors */
 561static inline u32 ps_pcie_dma_read(struct ps_pcie_dma_chan *chan, u32 reg)
 562{
 563        return ioread32((void __iomem *)((char *)(chan->chan_base) + reg));
 564}
 565
 566static inline void ps_pcie_dma_write(struct ps_pcie_dma_chan *chan, u32 reg,
 567                                     u32 value)
 568{
 569        iowrite32(value, (void __iomem *)((char *)(chan->chan_base) + reg));
 570}
 571
 572static inline void ps_pcie_dma_clr_mask(struct ps_pcie_dma_chan *chan, u32 reg,
 573                                        u32 mask)
 574{
 575        ps_pcie_dma_write(chan, reg, ps_pcie_dma_read(chan, reg) & ~mask);
 576}
 577
 578static inline void ps_pcie_dma_set_mask(struct ps_pcie_dma_chan *chan, u32 reg,
 579                                        u32 mask)
 580{
 581        ps_pcie_dma_write(chan, reg, ps_pcie_dma_read(chan, reg) | mask);
 582}
 583
 584/**
 585 * ps_pcie_dma_dev_intr_handler - This will be invoked for MSI/Legacy interrupts
 586 *
 587 * @irq: IRQ number
 588 * @data: Pointer to the PS PCIe DMA channel structure
 589 *
 590 * Return: IRQ_HANDLED/IRQ_NONE
 591 */
 592static irqreturn_t ps_pcie_dma_dev_intr_handler(int irq, void *data)
 593{
 594        struct xlnx_pcie_dma_device *xdev =
 595                (struct xlnx_pcie_dma_device *)data;
 596        struct ps_pcie_dma_chan *chan = NULL;
 597        int i;
 598        int err = -1;
 599        int ret = -1;
 600
 601        for (i = 0; i < xdev->num_channels; i++) {
 602                chan = &xdev->channels[i];
 603                err = ps_pcie_check_intr_status(chan);
 604                if (err == 0)
 605                        ret = 0;
 606        }
 607
 608        return (ret == 0) ? IRQ_HANDLED : IRQ_NONE;
 609}
 610
 611/**
 612 * ps_pcie_dma_chan_intr_handler - This will be invoked for MSI-X interrupts
 613 *
 614 * @irq: IRQ number
 615 * @data: Pointer to the PS PCIe DMA channel structure
 616 *
 617 * Return: IRQ_HANDLED
 618 */
 619static irqreturn_t ps_pcie_dma_chan_intr_handler(int irq, void *data)
 620{
 621        struct ps_pcie_dma_chan *chan = (struct ps_pcie_dma_chan *)data;
 622
 623        ps_pcie_check_intr_status(chan);
 624
 625        return IRQ_HANDLED;
 626}
 627
 628/**
 629 * chan_intr_setup - Requests Interrupt handler for individual channels
 630 *
 631 * @xdev: Driver specific data for device
 632 *
 633 * Return: 0 on success and non zero value on failure.
 634 */
 635static int chan_intr_setup(struct xlnx_pcie_dma_device *xdev)
 636{
 637        struct ps_pcie_dma_chan *chan;
 638        int i;
 639        int err = 0;
 640
 641        for (i = 0; i < xdev->num_channels; i++) {
 642                chan = &xdev->channels[i];
 643                err = devm_request_irq(xdev->dev,
 644                                       pci_irq_vector(xdev->pci_dev, i),
 645                                       ps_pcie_dma_chan_intr_handler,
 646                                       PS_PCIE_DMA_IRQ_NOSHARE,
 647                                       "PS PCIe DMA Chan Intr handler", chan);
 648                if (err) {
 649                        dev_err(xdev->dev,
 650                                "Irq %d for chan %d error %d\n",
 651                                pci_irq_vector(xdev->pci_dev, i),
 652                                chan->channel_number, err);
 653                        break;
 654                }
 655        }
 656
 657        if (err) {
 658                while (--i >= 0) {
 659                        chan = &xdev->channels[i];
 660                        devm_free_irq(xdev->dev,
 661                                      pci_irq_vector(xdev->pci_dev, i), chan);
 662                }
 663        }
 664
 665        return err;
 666}
 667
 668/**
 669 * device_intr_setup - Requests interrupt handler for DMA device
 670 *
 671 * @xdev: Driver specific data for device
 672 *
 673 * Return: 0 on success and non zero value on failure.
 674 */
 675static int device_intr_setup(struct xlnx_pcie_dma_device *xdev)
 676{
 677        int err;
 678        unsigned long intr_flags = IRQF_SHARED;
 679
 680        if (xdev->pci_dev->msix_enabled || xdev->pci_dev->msi_enabled)
 681                intr_flags = PS_PCIE_DMA_IRQ_NOSHARE;
 682
 683        err = devm_request_irq(xdev->dev,
 684                               pci_irq_vector(xdev->pci_dev, 0),
 685                               ps_pcie_dma_dev_intr_handler,
 686                               intr_flags,
 687                               "PS PCIe DMA Intr Handler", xdev);
 688        if (err)
 689                dev_err(xdev->dev, "Couldn't request irq %d\n",
 690                        pci_irq_vector(xdev->pci_dev, 0));
 691
 692        return err;
 693}
 694
 695/**
 696 * irq_setup - Requests interrupts based on the interrupt type detected
 697 *
 698 * @xdev: Driver specific data for device
 699 *
 700 * Return: 0 on success and non zero value on failure.
 701 */
 702static int irq_setup(struct xlnx_pcie_dma_device *xdev)
 703{
 704        int err;
 705
 706        if (xdev->irq_vecs == xdev->num_channels)
 707                err = chan_intr_setup(xdev);
 708        else
 709                err = device_intr_setup(xdev);
 710
 711        return err;
 712}
 713
 714static int platform_irq_setup(struct xlnx_pcie_dma_device *xdev)
 715{
 716        int err;
 717
 718        err = devm_request_irq(xdev->dev,
 719                               xdev->platform_irq_vec,
 720                               ps_pcie_dma_dev_intr_handler,
 721                               IRQF_SHARED,
 722                               "PS PCIe Root DMA Handler", xdev);
 723        if (err)
 724                dev_err(xdev->dev, "Couldn't request irq %d\n",
 725                        xdev->platform_irq_vec);
 726
 727        return err;
 728}
 729
 730/**
 731 * irq_probe - Checks which interrupt types can be serviced by hardware
 732 *
 733 * @xdev: Driver specific data for device
 734 *
 735 * Return: Number of interrupt vectors when successful or -ENOSPC on failure
 736 */
 737static int irq_probe(struct xlnx_pcie_dma_device *xdev)
 738{
 739        struct pci_dev *pdev;
 740
 741        pdev = xdev->pci_dev;
 742
 743        xdev->irq_vecs = pci_alloc_irq_vectors(pdev, 1, xdev->num_channels,
 744                                               PCI_IRQ_ALL_TYPES);
 745        return xdev->irq_vecs;
 746}
 747
 748/**
 749 * ps_pcie_check_intr_status - Checks channel interrupt status
 750 *
 751 * @chan: Pointer to the PS PCIe DMA channel structure
 752 *
 753 * Return: 0 if interrupt is pending on channel
 754 *                 -1 if no interrupt is pending on channel
 755 */
 756static int ps_pcie_check_intr_status(struct ps_pcie_dma_chan *chan)
 757{
 758        int err = -1;
 759        u32 status;
 760
 761        if (chan->state != CHANNEL_AVAILABLE)
 762                return err;
 763
 764        status = ps_pcie_dma_read(chan, chan->intr_status_offset);
 765
 766        if (status & DMA_INTSTATUS_SGLINTR_BIT) {
 767                if (chan->primary_desc_cleanup) {
 768                        queue_work(chan->primary_desc_cleanup,
 769                                   &chan->handle_primary_desc_cleanup);
 770                }
 771                /* Clearing Persistent bit */
 772                ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
 773                                     DMA_INTSTATUS_SGLINTR_BIT);
 774                err = 0;
 775        }
 776
 777        if (status & DMA_INTSTATUS_SWINTR_BIT) {
 778                if (chan->sw_intrs_wrkq)
 779                        queue_work(chan->sw_intrs_wrkq, &chan->handle_sw_intrs);
 780                /* Clearing Persistent bit */
 781                ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
 782                                     DMA_INTSTATUS_SWINTR_BIT);
 783                err = 0;
 784        }
 785
 786        if (status & DMA_INTSTATUS_DMAERR_BIT) {
 787                dev_err(chan->dev,
 788                        "DMA Channel %d ControlStatus Reg: 0x%x",
 789                        chan->channel_number, status);
 790                dev_err(chan->dev,
 791                        "Chn %d SrcQLmt = %d SrcQSz = %d SrcQNxt = %d",
 792                        chan->channel_number,
 793                        chan->chan_base->src_q_limit,
 794                        chan->chan_base->src_q_size,
 795                        chan->chan_base->src_q_next);
 796                dev_err(chan->dev,
 797                        "Chn %d SrcStaLmt = %d SrcStaSz = %d SrcStaNxt = %d",
 798                        chan->channel_number,
 799                        chan->chan_base->stas_q_limit,
 800                        chan->chan_base->stas_q_size,
 801                        chan->chan_base->stas_q_next);
 802                dev_err(chan->dev,
 803                        "Chn %d DstQLmt = %d DstQSz = %d DstQNxt = %d",
 804                        chan->channel_number,
 805                        chan->chan_base->dst_q_limit,
 806                        chan->chan_base->dst_q_size,
 807                        chan->chan_base->dst_q_next);
 808                dev_err(chan->dev,
 809                        "Chan %d DstStaLmt = %d DstStaSz = %d DstStaNxt = %d",
 810                        chan->channel_number,
 811                        chan->chan_base->stad_q_limit,
 812                        chan->chan_base->stad_q_size,
 813                        chan->chan_base->stad_q_next);
 814                /* Clearing Persistent bit */
 815                ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
 816                                     DMA_INTSTATUS_DMAERR_BIT);
 817
 818                handle_error(chan);
 819
 820                err = 0;
 821        }
 822
 823        return err;
 824}
 825
 826static int init_hw_components(struct ps_pcie_dma_chan *chan)
 827{
 828        if (chan->psrc_sgl_bd && chan->psrc_sta_bd) {
 829                /*  Programming SourceQ and StatusQ bd addresses */
 830                chan->chan_base->src_q_next = 0;
 831                chan->chan_base->src_q_high =
 832                        upper_32_bits(chan->src_sgl_bd_pa);
 833                chan->chan_base->src_q_size = chan->total_descriptors;
 834                chan->chan_base->src_q_limit = 0;
 835                if (chan->xdev->is_rootdma) {
 836                        chan->chan_base->src_q_low = ROOTDMA_Q_READ_ATTRIBUTE
 837                                                     | DMA_QPTRLO_QLOCAXI_BIT;
 838                } else {
 839                        chan->chan_base->src_q_low = 0;
 840                }
 841                chan->chan_base->src_q_low |=
 842                        (lower_32_bits((chan->src_sgl_bd_pa))
 843                         & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
 844                        | DMA_QPTRLO_Q_ENABLE_BIT;
 845
 846                chan->chan_base->stas_q_next = 0;
 847                chan->chan_base->stas_q_high =
 848                        upper_32_bits(chan->src_sta_bd_pa);
 849                chan->chan_base->stas_q_size = chan->total_descriptors;
 850                chan->chan_base->stas_q_limit = chan->total_descriptors - 1;
 851                if (chan->xdev->is_rootdma) {
 852                        chan->chan_base->stas_q_low = ROOTDMA_Q_READ_ATTRIBUTE
 853                                                      | DMA_QPTRLO_QLOCAXI_BIT;
 854                } else {
 855                        chan->chan_base->stas_q_low = 0;
 856                }
 857                chan->chan_base->stas_q_low |=
 858                        (lower_32_bits(chan->src_sta_bd_pa)
 859                         & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
 860                        | DMA_QPTRLO_Q_ENABLE_BIT;
 861        }
 862
 863        if (chan->pdst_sgl_bd && chan->pdst_sta_bd) {
 864                /*  Programming DestinationQ and StatusQ buffer descriptors */
 865                chan->chan_base->dst_q_next = 0;
 866                chan->chan_base->dst_q_high =
 867                        upper_32_bits(chan->dst_sgl_bd_pa);
 868                chan->chan_base->dst_q_size = chan->total_descriptors;
 869                chan->chan_base->dst_q_limit = 0;
 870                if (chan->xdev->is_rootdma) {
 871                        chan->chan_base->dst_q_low = ROOTDMA_Q_READ_ATTRIBUTE
 872                                                     | DMA_QPTRLO_QLOCAXI_BIT;
 873                } else {
 874                        chan->chan_base->dst_q_low = 0;
 875                }
 876                chan->chan_base->dst_q_low |=
 877                        (lower_32_bits(chan->dst_sgl_bd_pa)
 878                         & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
 879                        | DMA_QPTRLO_Q_ENABLE_BIT;
 880
 881                chan->chan_base->stad_q_next = 0;
 882                chan->chan_base->stad_q_high =
 883                        upper_32_bits(chan->dst_sta_bd_pa);
 884                chan->chan_base->stad_q_size = chan->total_descriptors;
 885                chan->chan_base->stad_q_limit = chan->total_descriptors - 1;
 886                if (chan->xdev->is_rootdma) {
 887                        chan->chan_base->stad_q_low = ROOTDMA_Q_READ_ATTRIBUTE
 888                                                      | DMA_QPTRLO_QLOCAXI_BIT;
 889                } else {
 890                        chan->chan_base->stad_q_low = 0;
 891                }
 892                chan->chan_base->stad_q_low |=
 893                        (lower_32_bits(chan->dst_sta_bd_pa)
 894                         & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
 895                        | DMA_QPTRLO_Q_ENABLE_BIT;
 896        }
 897
 898        return 0;
 899}
 900
 901static void update_channel_read_attribute(struct ps_pcie_dma_chan *chan)
 902{
 903        if (chan->xdev->is_rootdma) {
 904                /* For Root DMA, Host Memory and Buffer Descriptors
 905                 * will be on AXI side
 906                 */
 907                if (chan->srcq_buffer_location == BUFFER_LOC_PCI) {
 908                        chan->read_attribute = (AXI_ATTRIBUTE <<
 909                                                SRC_CTL_ATTRIB_BIT_SHIFT) |
 910                                                SOURCE_CONTROL_BD_LOC_AXI;
 911                } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
 912                        chan->read_attribute = AXI_ATTRIBUTE <<
 913                                               SRC_CTL_ATTRIB_BIT_SHIFT;
 914                }
 915        } else {
 916                if (chan->srcq_buffer_location == BUFFER_LOC_PCI) {
 917                        chan->read_attribute = PCI_ATTRIBUTE <<
 918                                               SRC_CTL_ATTRIB_BIT_SHIFT;
 919                } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
 920                        chan->read_attribute = (AXI_ATTRIBUTE <<
 921                                                SRC_CTL_ATTRIB_BIT_SHIFT) |
 922                                                SOURCE_CONTROL_BD_LOC_AXI;
 923                }
 924        }
 925}
 926
 927static void update_channel_write_attribute(struct ps_pcie_dma_chan *chan)
 928{
 929        if (chan->xdev->is_rootdma) {
 930                /* For Root DMA, Host Memory and Buffer Descriptors
 931                 * will be on AXI side
 932                 */
 933                if (chan->dstq_buffer_location == BUFFER_LOC_PCI) {
 934                        chan->write_attribute = (AXI_ATTRIBUTE <<
 935                                                 SRC_CTL_ATTRIB_BIT_SHIFT) |
 936                                                SOURCE_CONTROL_BD_LOC_AXI;
 937                } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
 938                        chan->write_attribute = AXI_ATTRIBUTE <<
 939                                                SRC_CTL_ATTRIB_BIT_SHIFT;
 940                }
 941        } else {
 942                if (chan->dstq_buffer_location == BUFFER_LOC_PCI) {
 943                        chan->write_attribute = PCI_ATTRIBUTE <<
 944                                                SRC_CTL_ATTRIB_BIT_SHIFT;
 945                } else if (chan->dstq_buffer_location == BUFFER_LOC_AXI) {
 946                        chan->write_attribute = (AXI_ATTRIBUTE <<
 947                                                 SRC_CTL_ATTRIB_BIT_SHIFT) |
 948                                                SOURCE_CONTROL_BD_LOC_AXI;
 949                }
 950        }
 951        chan->write_attribute |= SOURCE_CONTROL_BACK_TO_BACK_PACK_BIT;
 952}
 953
 954static int init_sw_components(struct ps_pcie_dma_chan *chan)
 955{
 956        if ((chan->ppkt_ctx_srcq) && (chan->psrc_sgl_bd) &&
 957            (chan->psrc_sta_bd)) {
 958                memset(chan->ppkt_ctx_srcq, 0,
 959                       sizeof(struct PACKET_TRANSFER_PARAMS)
 960                       * chan->total_descriptors);
 961
 962                memset(chan->psrc_sgl_bd, 0,
 963                       sizeof(struct SOURCE_DMA_DESCRIPTOR)
 964                       * chan->total_descriptors);
 965
 966                memset(chan->psrc_sta_bd, 0,
 967                       sizeof(struct STATUS_DMA_DESCRIPTOR)
 968                       * chan->total_descriptors);
 969
 970                chan->src_avail_descriptors = chan->total_descriptors;
 971
 972                chan->src_sgl_freeidx = 0;
 973                chan->src_staprobe_idx = 0;
 974                chan->src_sta_hw_probe_idx = chan->total_descriptors - 1;
 975                chan->idx_ctx_srcq_head = 0;
 976                chan->idx_ctx_srcq_tail = 0;
 977        }
 978
 979        if ((chan->ppkt_ctx_dstq) && (chan->pdst_sgl_bd) &&
 980            (chan->pdst_sta_bd)) {
 981                memset(chan->ppkt_ctx_dstq, 0,
 982                       sizeof(struct PACKET_TRANSFER_PARAMS)
 983                       * chan->total_descriptors);
 984
 985                memset(chan->pdst_sgl_bd, 0,
 986                       sizeof(struct DEST_DMA_DESCRIPTOR)
 987                       * chan->total_descriptors);
 988
 989                memset(chan->pdst_sta_bd, 0,
 990                       sizeof(struct STATUS_DMA_DESCRIPTOR)
 991                       * chan->total_descriptors);
 992
 993                chan->dst_avail_descriptors = chan->total_descriptors;
 994
 995                chan->dst_sgl_freeidx = 0;
 996                chan->dst_staprobe_idx = 0;
 997                chan->dst_sta_hw_probe_idx = chan->total_descriptors - 1;
 998                chan->idx_ctx_dstq_head = 0;
 999                chan->idx_ctx_dstq_tail = 0;
1000        }
1001
1002        return 0;
1003}
1004
1005/**
1006 * ps_pcie_chan_reset - Resets channel, by programming relevant registers
1007 *
1008 * @chan: PS PCIe DMA channel information holder
1009 * Return: void
1010 */
1011static void ps_pcie_chan_reset(struct ps_pcie_dma_chan *chan)
1012{
1013        /* Enable channel reset */
1014        ps_pcie_dma_set_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_RST_BIT);
1015
1016        mdelay(10);
1017
1018        /* Disable channel reset */
1019        ps_pcie_dma_clr_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_RST_BIT);
1020}
1021
1022/**
1023 * poll_completed_transactions - Function invoked by poll timer
1024 *
1025 * @arg: Pointer to PS PCIe DMA channel information
1026 * Return: void
1027 */
1028static void poll_completed_transactions(unsigned long arg)
1029{
1030        struct ps_pcie_dma_chan *chan = (struct ps_pcie_dma_chan *)arg;
1031
1032        if (chan->state == CHANNEL_AVAILABLE) {
1033                queue_work(chan->primary_desc_cleanup,
1034                           &chan->handle_primary_desc_cleanup);
1035        }
1036
1037        mod_timer(&chan->poll_timer, jiffies + chan->poll_timer_freq);
1038}
1039
1040static bool check_descriptors_for_two_queues(struct ps_pcie_dma_chan *chan,
1041                                             struct ps_pcie_tx_segment *seg)
1042{
1043        if (seg->tx_elements.src_sgl) {
1044                if (chan->src_avail_descriptors >=
1045                    seg->tx_elements.srcq_num_elemets) {
1046                        return true;
1047                }
1048        } else if (seg->tx_elements.dst_sgl) {
1049                if (chan->dst_avail_descriptors >=
1050                    seg->tx_elements.dstq_num_elemets) {
1051                        return true;
1052                }
1053        }
1054
1055        return false;
1056}
1057
1058static bool check_descriptors_for_all_queues(struct ps_pcie_dma_chan *chan,
1059                                             struct ps_pcie_tx_segment *seg)
1060{
1061        if ((chan->src_avail_descriptors >=
1062                seg->tx_elements.srcq_num_elemets) &&
1063            (chan->dst_avail_descriptors >=
1064                seg->tx_elements.dstq_num_elemets)) {
1065                return true;
1066        }
1067
1068        return false;
1069}
1070
1071static bool check_descriptor_availability(struct ps_pcie_dma_chan *chan,
1072                                          struct ps_pcie_tx_segment *seg)
1073{
1074        if (chan->num_queues == DEFAULT_DMA_QUEUES)
1075                return check_descriptors_for_all_queues(chan, seg);
1076        else
1077                return check_descriptors_for_two_queues(chan, seg);
1078}
1079
1080static void handle_error(struct ps_pcie_dma_chan *chan)
1081{
1082        if (chan->state != CHANNEL_AVAILABLE)
1083                return;
1084
1085        spin_lock(&chan->channel_lock);
1086        chan->state = CHANNEL_ERROR;
1087        spin_unlock(&chan->channel_lock);
1088
1089        if (chan->maintenance_workq)
1090                queue_work(chan->maintenance_workq, &chan->handle_chan_reset);
1091}
1092
1093static void xlnx_ps_pcie_update_srcq(struct ps_pcie_dma_chan *chan,
1094                                     struct ps_pcie_tx_segment *seg)
1095{
1096        struct SOURCE_DMA_DESCRIPTOR *pdesc;
1097        struct PACKET_TRANSFER_PARAMS *pkt_ctx = NULL;
1098        struct scatterlist *sgl_ptr;
1099        unsigned int i;
1100
1101        pkt_ctx = chan->ppkt_ctx_srcq + chan->idx_ctx_srcq_head;
1102        if (pkt_ctx->availability_status == IN_USE) {
1103                dev_err(chan->dev,
1104                        "src pkt context not avail for channel %d\n",
1105                        chan->channel_number);
1106                handle_error(chan);
1107                return;
1108        }
1109
1110        pkt_ctx->availability_status = IN_USE;
1111        pkt_ctx->sgl = seg->tx_elements.src_sgl;
1112
1113        if (chan->srcq_buffer_location == BUFFER_LOC_PCI)
1114                pkt_ctx->seg = seg;
1115
1116        /*  Get the address of the next available DMA Descriptor */
1117        pdesc = chan->psrc_sgl_bd + chan->src_sgl_freeidx;
1118        pkt_ctx->idx_sop = chan->src_sgl_freeidx;
1119
1120        /* Build transactions using information in the scatter gather list */
1121        for_each_sg(seg->tx_elements.src_sgl, sgl_ptr,
1122                    seg->tx_elements.srcq_num_elemets, i) {
1123                if (chan->xdev->dma_buf_ext_addr) {
1124                        pdesc->system_address =
1125                                (u64)sg_dma_address(sgl_ptr);
1126                } else {
1127                        pdesc->system_address =
1128                                (u32)sg_dma_address(sgl_ptr);
1129                }
1130
1131                pdesc->control_byte_count = (sg_dma_len(sgl_ptr) &
1132                                            SOURCE_CONTROL_BD_BYTE_COUNT_MASK) |
1133                                            chan->read_attribute;
1134                if (pkt_ctx->seg)
1135                        pkt_ctx->requested_bytes += sg_dma_len(sgl_ptr);
1136
1137                pdesc->user_handle = chan->idx_ctx_srcq_head;
1138                pdesc->user_id = DEFAULT_UID;
1139                /* Check if this is last descriptor */
1140                if (i == (seg->tx_elements.srcq_num_elemets - 1)) {
1141                        pkt_ctx->idx_eop = chan->src_sgl_freeidx;
1142                        pdesc->control_byte_count = pdesc->control_byte_count |
1143                                                SOURCE_CONTROL_BD_EOP_BIT |
1144                                                SOURCE_CONTROL_BD_INTR_BIT;
1145                }
1146                chan->src_sgl_freeidx++;
1147                if (chan->src_sgl_freeidx == chan->total_descriptors)
1148                        chan->src_sgl_freeidx = 0;
1149                pdesc = chan->psrc_sgl_bd + chan->src_sgl_freeidx;
1150                spin_lock(&chan->src_desc_lock);
1151                chan->src_avail_descriptors--;
1152                spin_unlock(&chan->src_desc_lock);
1153        }
1154
1155        chan->chan_base->src_q_limit = chan->src_sgl_freeidx;
1156        chan->idx_ctx_srcq_head++;
1157        if (chan->idx_ctx_srcq_head == chan->total_descriptors)
1158                chan->idx_ctx_srcq_head = 0;
1159}
1160
1161static void xlnx_ps_pcie_update_dstq(struct ps_pcie_dma_chan *chan,
1162                                     struct ps_pcie_tx_segment *seg)
1163{
1164        struct DEST_DMA_DESCRIPTOR *pdesc;
1165        struct PACKET_TRANSFER_PARAMS *pkt_ctx = NULL;
1166        struct scatterlist *sgl_ptr;
1167        unsigned int i;
1168
1169        pkt_ctx = chan->ppkt_ctx_dstq + chan->idx_ctx_dstq_head;
1170        if (pkt_ctx->availability_status == IN_USE) {
1171                dev_err(chan->dev,
1172                        "dst pkt context not avail for channel %d\n",
1173                        chan->channel_number);
1174                handle_error(chan);
1175
1176                return;
1177        }
1178
1179        pkt_ctx->availability_status = IN_USE;
1180        pkt_ctx->sgl = seg->tx_elements.dst_sgl;
1181
1182        if (chan->dstq_buffer_location == BUFFER_LOC_PCI)
1183                pkt_ctx->seg = seg;
1184
1185        pdesc = chan->pdst_sgl_bd + chan->dst_sgl_freeidx;
1186        pkt_ctx->idx_sop = chan->dst_sgl_freeidx;
1187
1188        /* Build transactions using information in the scatter gather list */
1189        for_each_sg(seg->tx_elements.dst_sgl, sgl_ptr,
1190                    seg->tx_elements.dstq_num_elemets, i) {
1191                if (chan->xdev->dma_buf_ext_addr) {
1192                        pdesc->system_address =
1193                                (u64)sg_dma_address(sgl_ptr);
1194                } else {
1195                        pdesc->system_address =
1196                                (u32)sg_dma_address(sgl_ptr);
1197                }
1198
1199                pdesc->control_byte_count = (sg_dma_len(sgl_ptr) &
1200                                        SOURCE_CONTROL_BD_BYTE_COUNT_MASK) |
1201                                                chan->write_attribute;
1202
1203                if (pkt_ctx->seg)
1204                        pkt_ctx->requested_bytes += sg_dma_len(sgl_ptr);
1205
1206                pdesc->user_handle = chan->idx_ctx_dstq_head;
1207                /* Check if this is last descriptor */
1208                if (i == (seg->tx_elements.dstq_num_elemets - 1))
1209                        pkt_ctx->idx_eop = chan->dst_sgl_freeidx;
1210                chan->dst_sgl_freeidx++;
1211                if (chan->dst_sgl_freeidx == chan->total_descriptors)
1212                        chan->dst_sgl_freeidx = 0;
1213                pdesc = chan->pdst_sgl_bd + chan->dst_sgl_freeidx;
1214                spin_lock(&chan->dst_desc_lock);
1215                chan->dst_avail_descriptors--;
1216                spin_unlock(&chan->dst_desc_lock);
1217        }
1218
1219        chan->chan_base->dst_q_limit = chan->dst_sgl_freeidx;
1220        chan->idx_ctx_dstq_head++;
1221        if (chan->idx_ctx_dstq_head == chan->total_descriptors)
1222                chan->idx_ctx_dstq_head = 0;
1223}
1224
1225static void ps_pcie_chan_program_work(struct work_struct *work)
1226{
1227        struct ps_pcie_dma_chan *chan =
1228                (struct ps_pcie_dma_chan *)container_of(work,
1229                                struct ps_pcie_dma_chan,
1230                                handle_chan_programming);
1231        struct ps_pcie_tx_segment *seg = NULL;
1232
1233        while (chan->state == CHANNEL_AVAILABLE) {
1234                spin_lock(&chan->active_list_lock);
1235                seg = list_first_entry_or_null(&chan->active_list,
1236                                               struct ps_pcie_tx_segment, node);
1237                spin_unlock(&chan->active_list_lock);
1238
1239                if (!seg)
1240                        break;
1241
1242                if (check_descriptor_availability(chan, seg) == false)
1243                        break;
1244
1245                spin_lock(&chan->active_list_lock);
1246                list_del(&seg->node);
1247                spin_unlock(&chan->active_list_lock);
1248
1249                if (seg->tx_elements.src_sgl)
1250                        xlnx_ps_pcie_update_srcq(chan, seg);
1251
1252                if (seg->tx_elements.dst_sgl)
1253                        xlnx_ps_pcie_update_dstq(chan, seg);
1254        }
1255}
1256
1257/**
1258 * dst_cleanup_work - Goes through all completed elements in status Q
1259 * and invokes callbacks for the concerned DMA transaction.
1260 *
1261 * @work: Work associated with the task
1262 *
1263 * Return: void
1264 */
1265static void dst_cleanup_work(struct work_struct *work)
1266{
1267        struct ps_pcie_dma_chan *chan =
1268                (struct ps_pcie_dma_chan *)container_of(work,
1269                        struct ps_pcie_dma_chan, handle_dstq_desc_cleanup);
1270
1271        struct STATUS_DMA_DESCRIPTOR *psta_bd;
1272        struct DEST_DMA_DESCRIPTOR *pdst_bd;
1273        struct PACKET_TRANSFER_PARAMS *ppkt_ctx;
1274        struct dmaengine_result rslt;
1275        u32 completed_bytes;
1276        u32 dstq_desc_idx;
1277
1278        psta_bd = chan->pdst_sta_bd + chan->dst_staprobe_idx;
1279
1280        while (psta_bd->status_flag_byte_count & STA_BD_COMPLETED_BIT) {
1281                if (psta_bd->status_flag_byte_count &
1282                                STA_BD_DESTINATION_ERROR_BIT) {
1283                        dev_err(chan->dev,
1284                                "Dst Sts Elmnt %d chan %d has Destination Err",
1285                                chan->dst_staprobe_idx + 1,
1286                                chan->channel_number);
1287                        handle_error(chan);
1288                        break;
1289                }
1290                if (psta_bd->status_flag_byte_count & STA_BD_SOURCE_ERROR_BIT) {
1291                        dev_err(chan->dev,
1292                                "Dst Sts Elmnt %d chan %d has Source Error",
1293                                chan->dst_staprobe_idx + 1,
1294                                chan->channel_number);
1295                        handle_error(chan);
1296                        break;
1297                }
1298                if (psta_bd->status_flag_byte_count &
1299                                STA_BD_INTERNAL_ERROR_BIT) {
1300                        dev_err(chan->dev,
1301                                "Dst Sts Elmnt %d chan %d has Internal Error",
1302                                chan->dst_staprobe_idx + 1,
1303                                chan->channel_number);
1304                        handle_error(chan);
1305                        break;
1306                }
1307                /* we are using 64 bit USER field. */
1308                if ((psta_bd->status_flag_byte_count &
1309                                        STA_BD_UPPER_STATUS_NONZERO_BIT) == 0) {
1310                        dev_err(chan->dev,
1311                                "Dst Sts Elmnt %d for chan %d has NON ZERO",
1312                                chan->dst_staprobe_idx + 1,
1313                                chan->channel_number);
1314                        handle_error(chan);
1315                        break;
1316                }
1317
1318                chan->idx_ctx_dstq_tail = psta_bd->user_handle;
1319                ppkt_ctx = chan->ppkt_ctx_dstq + chan->idx_ctx_dstq_tail;
1320                completed_bytes = (psta_bd->status_flag_byte_count &
1321                                        STA_BD_BYTE_COUNT_MASK) >>
1322                                                STA_BD_BYTE_COUNT_SHIFT;
1323
1324                memset(psta_bd, 0, sizeof(struct STATUS_DMA_DESCRIPTOR));
1325
1326                chan->dst_staprobe_idx++;
1327
1328                if (chan->dst_staprobe_idx == chan->total_descriptors)
1329                        chan->dst_staprobe_idx = 0;
1330
1331                chan->dst_sta_hw_probe_idx++;
1332
1333                if (chan->dst_sta_hw_probe_idx == chan->total_descriptors)
1334                        chan->dst_sta_hw_probe_idx = 0;
1335
1336                chan->chan_base->stad_q_limit = chan->dst_sta_hw_probe_idx;
1337
1338                psta_bd = chan->pdst_sta_bd + chan->dst_staprobe_idx;
1339
1340                dstq_desc_idx = ppkt_ctx->idx_sop;
1341
1342                do {
1343                        pdst_bd = chan->pdst_sgl_bd + dstq_desc_idx;
1344                        memset(pdst_bd, 0,
1345                               sizeof(struct DEST_DMA_DESCRIPTOR));
1346
1347                        spin_lock(&chan->dst_desc_lock);
1348                        chan->dst_avail_descriptors++;
1349                        spin_unlock(&chan->dst_desc_lock);
1350
1351                        if (dstq_desc_idx == ppkt_ctx->idx_eop)
1352                                break;
1353
1354                        dstq_desc_idx++;
1355
1356                        if (dstq_desc_idx == chan->total_descriptors)
1357                                dstq_desc_idx = 0;
1358
1359                } while (1);
1360
1361                /* Invoking callback */
1362                if (ppkt_ctx->seg) {
1363                        spin_lock(&chan->cookie_lock);
1364                        dma_cookie_complete(&ppkt_ctx->seg->async_tx);
1365                        spin_unlock(&chan->cookie_lock);
1366                        rslt.result = DMA_TRANS_NOERROR;
1367                        rslt.residue = ppkt_ctx->requested_bytes -
1368                                        completed_bytes;
1369                        dmaengine_desc_get_callback_invoke(&ppkt_ctx->seg->async_tx,
1370                                                           &rslt);
1371                        mempool_free(ppkt_ctx->seg, chan->transactions_pool);
1372                }
1373                memset(ppkt_ctx, 0, sizeof(struct PACKET_TRANSFER_PARAMS));
1374        }
1375
1376        complete(&chan->dstq_work_complete);
1377}
1378
1379/**
1380 * src_cleanup_work - Goes through all completed elements in status Q and
1381 * invokes callbacks for the concerned DMA transaction.
1382 *
1383 * @work: Work associated with the task
1384 *
1385 * Return: void
1386 */
1387static void src_cleanup_work(struct work_struct *work)
1388{
1389        struct ps_pcie_dma_chan *chan =
1390                (struct ps_pcie_dma_chan *)container_of(
1391                work, struct ps_pcie_dma_chan, handle_srcq_desc_cleanup);
1392
1393        struct STATUS_DMA_DESCRIPTOR *psta_bd;
1394        struct SOURCE_DMA_DESCRIPTOR *psrc_bd;
1395        struct PACKET_TRANSFER_PARAMS *ppkt_ctx;
1396        struct dmaengine_result rslt;
1397        u32 completed_bytes;
1398        u32 srcq_desc_idx;
1399
1400        psta_bd = chan->psrc_sta_bd + chan->src_staprobe_idx;
1401
1402        while (psta_bd->status_flag_byte_count & STA_BD_COMPLETED_BIT) {
1403                if (psta_bd->status_flag_byte_count &
1404                                STA_BD_DESTINATION_ERROR_BIT) {
1405                        dev_err(chan->dev,
1406                                "Src Sts Elmnt %d chan %d has Dst Error",
1407                                chan->src_staprobe_idx + 1,
1408                                chan->channel_number);
1409                        handle_error(chan);
1410                        break;
1411                }
1412                if (psta_bd->status_flag_byte_count & STA_BD_SOURCE_ERROR_BIT) {
1413                        dev_err(chan->dev,
1414                                "Src Sts Elmnt %d chan %d has Source Error",
1415                                chan->src_staprobe_idx + 1,
1416                                chan->channel_number);
1417                        handle_error(chan);
1418                        break;
1419                }
1420                if (psta_bd->status_flag_byte_count &
1421                                STA_BD_INTERNAL_ERROR_BIT) {
1422                        dev_err(chan->dev,
1423                                "Src Sts Elmnt %d chan %d has Internal Error",
1424                                chan->src_staprobe_idx + 1,
1425                                chan->channel_number);
1426                        handle_error(chan);
1427                        break;
1428                }
1429                if ((psta_bd->status_flag_byte_count
1430                                & STA_BD_UPPER_STATUS_NONZERO_BIT) == 0) {
1431                        dev_err(chan->dev,
1432                                "Src Sts Elmnt %d chan %d has NonZero",
1433                                chan->src_staprobe_idx + 1,
1434                                chan->channel_number);
1435                        handle_error(chan);
1436                        break;
1437                }
1438                chan->idx_ctx_srcq_tail = psta_bd->user_handle;
1439                ppkt_ctx = chan->ppkt_ctx_srcq + chan->idx_ctx_srcq_tail;
1440                completed_bytes = (psta_bd->status_flag_byte_count
1441                                        & STA_BD_BYTE_COUNT_MASK) >>
1442                                                STA_BD_BYTE_COUNT_SHIFT;
1443
1444                memset(psta_bd, 0, sizeof(struct STATUS_DMA_DESCRIPTOR));
1445
1446                chan->src_staprobe_idx++;
1447
1448                if (chan->src_staprobe_idx == chan->total_descriptors)
1449                        chan->src_staprobe_idx = 0;
1450
1451                chan->src_sta_hw_probe_idx++;
1452
1453                if (chan->src_sta_hw_probe_idx == chan->total_descriptors)
1454                        chan->src_sta_hw_probe_idx = 0;
1455
1456                chan->chan_base->stas_q_limit = chan->src_sta_hw_probe_idx;
1457
1458                psta_bd = chan->psrc_sta_bd + chan->src_staprobe_idx;
1459
1460                srcq_desc_idx = ppkt_ctx->idx_sop;
1461
1462                do {
1463                        psrc_bd = chan->psrc_sgl_bd + srcq_desc_idx;
1464                        memset(psrc_bd, 0,
1465                               sizeof(struct SOURCE_DMA_DESCRIPTOR));
1466
1467                        spin_lock(&chan->src_desc_lock);
1468                        chan->src_avail_descriptors++;
1469                        spin_unlock(&chan->src_desc_lock);
1470
1471                        if (srcq_desc_idx == ppkt_ctx->idx_eop)
1472                                break;
1473                        srcq_desc_idx++;
1474
1475                        if (srcq_desc_idx == chan->total_descriptors)
1476                                srcq_desc_idx = 0;
1477
1478                } while (1);
1479
1480                /* Invoking callback */
1481                if (ppkt_ctx->seg) {
1482                        spin_lock(&chan->cookie_lock);
1483                        dma_cookie_complete(&ppkt_ctx->seg->async_tx);
1484                        spin_unlock(&chan->cookie_lock);
1485                        rslt.result = DMA_TRANS_NOERROR;
1486                        rslt.residue = ppkt_ctx->requested_bytes -
1487                                        completed_bytes;
1488                        dmaengine_desc_get_callback_invoke(&ppkt_ctx->seg->async_tx,
1489                                                           &rslt);
1490                        mempool_free(ppkt_ctx->seg, chan->transactions_pool);
1491                }
1492                memset(ppkt_ctx, 0, sizeof(struct PACKET_TRANSFER_PARAMS));
1493        }
1494
1495        complete(&chan->srcq_work_complete);
1496}
1497
1498/**
1499 * ps_pcie_chan_primary_work - Masks out interrupts, invokes source Q and
1500 * destination Q processing. Waits for source Q and destination Q processing
1501 * and re enables interrupts. Same work is invoked by timer if coalesce count
1502 * is greater than zero and interrupts are not invoked before the timeout period
1503 *
1504 * @work: Work associated with the task
1505 *
1506 * Return: void
1507 */
1508static void ps_pcie_chan_primary_work(struct work_struct *work)
1509{
1510        struct ps_pcie_dma_chan *chan =
1511                (struct ps_pcie_dma_chan *)container_of(
1512                                work, struct ps_pcie_dma_chan,
1513                                handle_primary_desc_cleanup);
1514
1515        /* Disable interrupts for Channel */
1516        ps_pcie_dma_clr_mask(chan, chan->intr_control_offset,
1517                             DMA_INTCNTRL_ENABLINTR_BIT);
1518
1519        if (chan->psrc_sgl_bd) {
1520                reinit_completion(&chan->srcq_work_complete);
1521                if (chan->srcq_desc_cleanup)
1522                        queue_work(chan->srcq_desc_cleanup,
1523                                   &chan->handle_srcq_desc_cleanup);
1524        }
1525        if (chan->pdst_sgl_bd) {
1526                reinit_completion(&chan->dstq_work_complete);
1527                if (chan->dstq_desc_cleanup)
1528                        queue_work(chan->dstq_desc_cleanup,
1529                                   &chan->handle_dstq_desc_cleanup);
1530        }
1531
1532        if (chan->psrc_sgl_bd)
1533                wait_for_completion_interruptible(&chan->srcq_work_complete);
1534        if (chan->pdst_sgl_bd)
1535                wait_for_completion_interruptible(&chan->dstq_work_complete);
1536
1537        /* Enable interrupts for channel */
1538        ps_pcie_dma_set_mask(chan, chan->intr_control_offset,
1539                             DMA_INTCNTRL_ENABLINTR_BIT);
1540
1541        if (chan->chan_programming) {
1542                queue_work(chan->chan_programming,
1543                           &chan->handle_chan_programming);
1544        }
1545
1546        if (chan->coalesce_count > 0 && chan->poll_timer.function)
1547                mod_timer(&chan->poll_timer, jiffies + chan->poll_timer_freq);
1548}
1549
1550static int read_rootdma_config(struct platform_device *platform_dev,
1551                               struct xlnx_pcie_dma_device *xdev)
1552{
1553        int err;
1554        struct resource *r;
1555
1556        err = dma_set_mask(&platform_dev->dev, DMA_BIT_MASK(64));
1557        if (err) {
1558                dev_info(&platform_dev->dev, "Cannot set 64 bit DMA mask\n");
1559                err = dma_set_mask(&platform_dev->dev, DMA_BIT_MASK(32));
1560                if (err) {
1561                        dev_err(&platform_dev->dev, "DMA mask set error\n");
1562                        return err;
1563                }
1564        }
1565
1566        err = dma_set_coherent_mask(&platform_dev->dev, DMA_BIT_MASK(64));
1567        if (err) {
1568                dev_info(&platform_dev->dev, "Cannot set 64 bit consistent DMA mask\n");
1569                err = dma_set_coherent_mask(&platform_dev->dev,
1570                                            DMA_BIT_MASK(32));
1571                if (err) {
1572                        dev_err(&platform_dev->dev, "Cannot set consistent DMA mask\n");
1573                        return err;
1574                }
1575        }
1576
1577        r = platform_get_resource_byname(platform_dev, IORESOURCE_MEM,
1578                                         "ps_pcie_regbase");
1579        if (!r) {
1580                dev_err(&platform_dev->dev,
1581                        "Unable to find memory resource for root dma\n");
1582                return PTR_ERR(r);
1583        }
1584
1585        xdev->reg_base = devm_ioremap_resource(&platform_dev->dev, r);
1586        if (IS_ERR(xdev->reg_base)) {
1587                dev_err(&platform_dev->dev, "ioresource error for root dma\n");
1588                return PTR_ERR(xdev->reg_base);
1589        }
1590
1591        xdev->platform_irq_vec =
1592                platform_get_irq_byname(platform_dev,
1593                                        "ps_pcie_rootdma_intr");
1594        if (xdev->platform_irq_vec < 0) {
1595                dev_err(&platform_dev->dev,
1596                        "Unable to get interrupt number for root dma\n");
1597                return xdev->platform_irq_vec;
1598        }
1599
1600        err = device_property_read_u16(&platform_dev->dev, "dma_vendorid",
1601                                       &xdev->rootdma_vendor);
1602        if (err) {
1603                dev_err(&platform_dev->dev,
1604                        "Unable to find RootDMA PCI Vendor Id\n");
1605                return err;
1606        }
1607
1608        err = device_property_read_u16(&platform_dev->dev, "dma_deviceid",
1609                                       &xdev->rootdma_device);
1610        if (err) {
1611                dev_err(&platform_dev->dev,
1612                        "Unable to find RootDMA PCI Device Id\n");
1613                return err;
1614        }
1615
1616        xdev->common.dev = xdev->dev;
1617
1618        return 0;
1619}
1620
1621static int read_epdma_config(struct platform_device *platform_dev,
1622                             struct xlnx_pcie_dma_device *xdev)
1623{
1624        int err;
1625        struct pci_dev *pdev;
1626        u16 i;
1627        void __iomem * const *pci_iomap;
1628        unsigned long pci_bar_length;
1629
1630        pdev = *((struct pci_dev **)(platform_dev->dev.platform_data));
1631        xdev->pci_dev = pdev;
1632
1633        for (i = 0; i < MAX_BARS; i++) {
1634                if (pci_resource_len(pdev, i) == 0)
1635                        continue;
1636                xdev->bar_mask = xdev->bar_mask | (1 << (i));
1637        }
1638
1639        err = pcim_iomap_regions(pdev, xdev->bar_mask, PLATFORM_DRIVER_NAME);
1640        if (err) {
1641                dev_err(&pdev->dev, "Cannot request PCI regions, aborting\n");
1642                return err;
1643        }
1644
1645        pci_iomap = pcim_iomap_table(pdev);
1646        if (!pci_iomap) {
1647                err = -ENOMEM;
1648                return err;
1649        }
1650
1651        for (i = 0; i < MAX_BARS; i++) {
1652                pci_bar_length = pci_resource_len(pdev, i);
1653                if (pci_bar_length == 0) {
1654                        xdev->bar_info[i].BAR_LENGTH = 0;
1655                        xdev->bar_info[i].BAR_PHYS_ADDR = 0;
1656                        xdev->bar_info[i].BAR_VIRT_ADDR = NULL;
1657                } else {
1658                        xdev->bar_info[i].BAR_LENGTH =
1659                                pci_bar_length;
1660                        xdev->bar_info[i].BAR_PHYS_ADDR =
1661                                pci_resource_start(pdev, i);
1662                        xdev->bar_info[i].BAR_VIRT_ADDR =
1663                                pci_iomap[i];
1664                }
1665        }
1666
1667        xdev->reg_base = pci_iomap[DMA_BAR_NUMBER];
1668
1669        err = irq_probe(xdev);
1670        if (err < 0) {
1671                dev_err(&pdev->dev, "Cannot probe irq lines for device %d\n",
1672                        platform_dev->id);
1673                return err;
1674        }
1675
1676        xdev->common.dev = &pdev->dev;
1677
1678        return 0;
1679}
1680
1681static int probe_channel_properties(struct platform_device *platform_dev,
1682                                    struct xlnx_pcie_dma_device *xdev,
1683                                    u16 channel_number)
1684{
1685        int i;
1686        char propertyname[CHANNEL_PROPERTY_LENGTH];
1687        int numvals, ret;
1688        u32 *val;
1689        struct ps_pcie_dma_chan *channel;
1690        struct ps_pcie_dma_channel_match *xlnx_match;
1691
1692        snprintf(propertyname, CHANNEL_PROPERTY_LENGTH,
1693                 "ps_pcie_channel%d", channel_number);
1694
1695        channel = &xdev->channels[channel_number];
1696
1697        spin_lock_init(&channel->channel_lock);
1698        spin_lock_init(&channel->cookie_lock);
1699
1700        INIT_LIST_HEAD(&channel->pending_list);
1701        spin_lock_init(&channel->pending_list_lock);
1702
1703        INIT_LIST_HEAD(&channel->active_list);
1704        spin_lock_init(&channel->active_list_lock);
1705
1706        spin_lock_init(&channel->src_desc_lock);
1707        spin_lock_init(&channel->dst_desc_lock);
1708
1709        INIT_LIST_HEAD(&channel->pending_interrupts_list);
1710        spin_lock_init(&channel->pending_interrupts_lock);
1711
1712        INIT_LIST_HEAD(&channel->active_interrupts_list);
1713        spin_lock_init(&channel->active_interrupts_lock);
1714
1715        init_completion(&channel->srcq_work_complete);
1716        init_completion(&channel->dstq_work_complete);
1717        init_completion(&channel->chan_shutdown_complt);
1718        init_completion(&channel->chan_terminate_complete);
1719
1720        if (device_property_present(&platform_dev->dev, propertyname)) {
1721                numvals = device_property_read_u32_array(&platform_dev->dev,
1722                                                         propertyname, NULL, 0);
1723
1724                if (numvals < 0)
1725                        return numvals;
1726
1727                val = devm_kzalloc(&platform_dev->dev, sizeof(u32) * numvals,
1728                                   GFP_KERNEL);
1729
1730                if (!val)
1731                        return -ENOMEM;
1732
1733                ret = device_property_read_u32_array(&platform_dev->dev,
1734                                                     propertyname, val,
1735                                                     numvals);
1736                if (ret < 0) {
1737                        dev_err(&platform_dev->dev,
1738                                "Unable to read property %s\n", propertyname);
1739                        return ret;
1740                }
1741
1742                for (i = 0; i < numvals; i++) {
1743                        switch (i) {
1744                        case DMA_CHANNEL_DIRECTION:
1745                                channel->direction =
1746                                        (val[DMA_CHANNEL_DIRECTION] ==
1747                                                PCIE_AXI_DIRECTION) ?
1748                                                DMA_TO_DEVICE : DMA_FROM_DEVICE;
1749                                break;
1750                        case NUM_DESCRIPTORS:
1751                                channel->total_descriptors =
1752                                                val[NUM_DESCRIPTORS];
1753                                if (channel->total_descriptors >
1754                                        MAX_DESCRIPTORS) {
1755                                        dev_info(&platform_dev->dev,
1756                                                 "Descriptors > alowd max\n");
1757                                        channel->total_descriptors =
1758                                                        MAX_DESCRIPTORS;
1759                                }
1760                                break;
1761                        case NUM_QUEUES:
1762                                channel->num_queues = val[NUM_QUEUES];
1763                                switch (channel->num_queues) {
1764                                case DEFAULT_DMA_QUEUES:
1765                                                break;
1766                                case TWO_DMA_QUEUES:
1767                                                break;
1768                                default:
1769                                dev_info(&platform_dev->dev,
1770                                         "Incorrect Q number for dma chan\n");
1771                                channel->num_queues = DEFAULT_DMA_QUEUES;
1772                                }
1773                                break;
1774                        case COALESE_COUNT:
1775                                channel->coalesce_count = val[COALESE_COUNT];
1776
1777                                if (channel->coalesce_count >
1778                                        MAX_COALESCE_COUNT) {
1779                                        dev_info(&platform_dev->dev,
1780                                                 "Invalid coalesce Count\n");
1781                                        channel->coalesce_count =
1782                                                MAX_COALESCE_COUNT;
1783                                }
1784                                break;
1785                        case POLL_TIMER_FREQUENCY:
1786                                channel->poll_timer_freq =
1787                                        val[POLL_TIMER_FREQUENCY];
1788                                break;
1789                        default:
1790                                dev_err(&platform_dev->dev,
1791                                        "Check order of channel properties!\n");
1792                        }
1793                }
1794        } else {
1795                dev_err(&platform_dev->dev,
1796                        "Property %s not present. Invalid configuration!\n",
1797                                propertyname);
1798                return -ENOTSUPP;
1799        }
1800
1801        if (channel->direction == DMA_TO_DEVICE) {
1802                if (channel->num_queues == DEFAULT_DMA_QUEUES) {
1803                        channel->srcq_buffer_location = BUFFER_LOC_PCI;
1804                        channel->dstq_buffer_location = BUFFER_LOC_AXI;
1805                } else {
1806                        channel->srcq_buffer_location = BUFFER_LOC_PCI;
1807                        channel->dstq_buffer_location = BUFFER_LOC_INVALID;
1808                }
1809        } else {
1810                if (channel->num_queues == DEFAULT_DMA_QUEUES) {
1811                        channel->srcq_buffer_location = BUFFER_LOC_AXI;
1812                        channel->dstq_buffer_location = BUFFER_LOC_PCI;
1813                } else {
1814                        channel->srcq_buffer_location = BUFFER_LOC_INVALID;
1815                        channel->dstq_buffer_location = BUFFER_LOC_PCI;
1816                }
1817        }
1818
1819        channel->xdev = xdev;
1820        channel->channel_number = channel_number;
1821
1822        if (xdev->is_rootdma) {
1823                channel->dev = xdev->dev;
1824                channel->intr_status_offset = DMA_AXI_INTR_STATUS_REG_OFFSET;
1825                channel->intr_control_offset = DMA_AXI_INTR_CNTRL_REG_OFFSET;
1826        } else {
1827                channel->dev = &xdev->pci_dev->dev;
1828                channel->intr_status_offset = DMA_PCIE_INTR_STATUS_REG_OFFSET;
1829                channel->intr_control_offset = DMA_PCIE_INTR_CNTRL_REG_OFFSET;
1830        }
1831
1832        channel->chan_base =
1833        (struct DMA_ENGINE_REGISTERS *)((__force char *)(xdev->reg_base) +
1834                                 (channel_number * DMA_CHANNEL_REGS_SIZE));
1835
1836        if (((channel->chan_base->dma_channel_status) &
1837                                DMA_STATUS_DMA_PRES_BIT) == 0) {
1838                dev_err(&platform_dev->dev,
1839                        "Hardware reports channel not present\n");
1840                return -ENOTSUPP;
1841        }
1842
1843        update_channel_read_attribute(channel);
1844        update_channel_write_attribute(channel);
1845
1846        xlnx_match = devm_kzalloc(&platform_dev->dev,
1847                                  sizeof(struct ps_pcie_dma_channel_match),
1848                                  GFP_KERNEL);
1849
1850        if (!xlnx_match)
1851                return -ENOMEM;
1852
1853        if (xdev->is_rootdma) {
1854                xlnx_match->pci_vendorid = xdev->rootdma_vendor;
1855                xlnx_match->pci_deviceid = xdev->rootdma_device;
1856        } else {
1857                xlnx_match->pci_vendorid = xdev->pci_dev->vendor;
1858                xlnx_match->pci_deviceid = xdev->pci_dev->device;
1859                xlnx_match->bar_params = xdev->bar_info;
1860        }
1861
1862        xlnx_match->board_number = xdev->board_number;
1863        xlnx_match->channel_number = channel_number;
1864        xlnx_match->direction = xdev->channels[channel_number].direction;
1865
1866        channel->common.private = (void *)xlnx_match;
1867
1868        channel->common.device = &xdev->common;
1869        list_add_tail(&channel->common.device_node, &xdev->common.channels);
1870
1871        return 0;
1872}
1873
1874static void xlnx_ps_pcie_destroy_mempool(struct ps_pcie_dma_chan *chan)
1875{
1876        mempool_destroy(chan->transactions_pool);
1877
1878        mempool_destroy(chan->intr_transactions_pool);
1879}
1880
1881static void xlnx_ps_pcie_free_worker_queues(struct ps_pcie_dma_chan *chan)
1882{
1883        if (chan->maintenance_workq)
1884                destroy_workqueue(chan->maintenance_workq);
1885
1886        if (chan->sw_intrs_wrkq)
1887                destroy_workqueue(chan->sw_intrs_wrkq);
1888
1889        if (chan->srcq_desc_cleanup)
1890                destroy_workqueue(chan->srcq_desc_cleanup);
1891
1892        if (chan->dstq_desc_cleanup)
1893                destroy_workqueue(chan->dstq_desc_cleanup);
1894
1895        if (chan->chan_programming)
1896                destroy_workqueue(chan->chan_programming);
1897
1898        if (chan->primary_desc_cleanup)
1899                destroy_workqueue(chan->primary_desc_cleanup);
1900}
1901
1902static void xlnx_ps_pcie_free_pkt_ctxts(struct ps_pcie_dma_chan *chan)
1903{
1904        kfree(chan->ppkt_ctx_srcq);
1905
1906        kfree(chan->ppkt_ctx_dstq);
1907}
1908
1909static void xlnx_ps_pcie_free_descriptors(struct ps_pcie_dma_chan *chan)
1910{
1911        ssize_t size;
1912
1913        if (chan->psrc_sgl_bd) {
1914                size = chan->total_descriptors *
1915                        sizeof(struct SOURCE_DMA_DESCRIPTOR);
1916                dma_free_coherent(chan->dev, size, chan->psrc_sgl_bd,
1917                                  chan->src_sgl_bd_pa);
1918        }
1919
1920        if (chan->pdst_sgl_bd) {
1921                size = chan->total_descriptors *
1922                        sizeof(struct DEST_DMA_DESCRIPTOR);
1923                dma_free_coherent(chan->dev, size, chan->pdst_sgl_bd,
1924                                  chan->dst_sgl_bd_pa);
1925        }
1926
1927        if (chan->psrc_sta_bd) {
1928                size = chan->total_descriptors *
1929                        sizeof(struct STATUS_DMA_DESCRIPTOR);
1930                dma_free_coherent(chan->dev, size, chan->psrc_sta_bd,
1931                                  chan->src_sta_bd_pa);
1932        }
1933
1934        if (chan->pdst_sta_bd) {
1935                size = chan->total_descriptors *
1936                        sizeof(struct STATUS_DMA_DESCRIPTOR);
1937                dma_free_coherent(chan->dev, size, chan->pdst_sta_bd,
1938                                  chan->dst_sta_bd_pa);
1939        }
1940}
1941
1942static int xlnx_ps_pcie_channel_activate(struct ps_pcie_dma_chan *chan)
1943{
1944        u32 reg = chan->coalesce_count;
1945
1946        reg = reg << DMA_INTCNTRL_SGCOLSCCNT_BIT_SHIFT;
1947
1948        /* Enable Interrupts for channel */
1949        ps_pcie_dma_set_mask(chan, chan->intr_control_offset,
1950                             reg | DMA_INTCNTRL_ENABLINTR_BIT |
1951                             DMA_INTCNTRL_DMAERRINTR_BIT |
1952                             DMA_INTCNTRL_DMASGINTR_BIT);
1953
1954        /* Enable DMA */
1955        ps_pcie_dma_set_mask(chan, DMA_CNTRL_REG_OFFSET,
1956                             DMA_CNTRL_ENABL_BIT |
1957                             DMA_CNTRL_64BIT_STAQ_ELEMSZ_BIT);
1958
1959        spin_lock(&chan->channel_lock);
1960        chan->state = CHANNEL_AVAILABLE;
1961        spin_unlock(&chan->channel_lock);
1962
1963        /* Activate timer if required */
1964        if ((chan->coalesce_count > 0) && !chan->poll_timer.function)
1965                xlnx_ps_pcie_alloc_poll_timer(chan);
1966
1967        return 0;
1968}
1969
1970static void xlnx_ps_pcie_channel_quiesce(struct ps_pcie_dma_chan *chan)
1971{
1972        /* Disable interrupts for Channel */
1973        ps_pcie_dma_clr_mask(chan, chan->intr_control_offset,
1974                             DMA_INTCNTRL_ENABLINTR_BIT);
1975
1976        /* Delete timer if it is created */
1977        if ((chan->coalesce_count > 0) && (!chan->poll_timer.function))
1978                xlnx_ps_pcie_free_poll_timer(chan);
1979
1980        /* Flush descriptor cleaning work queues */
1981        if (chan->primary_desc_cleanup)
1982                flush_workqueue(chan->primary_desc_cleanup);
1983
1984        /* Flush channel programming work queue */
1985        if (chan->chan_programming)
1986                flush_workqueue(chan->chan_programming);
1987
1988        /*  Clear the persistent bits */
1989        ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
1990                             DMA_INTSTATUS_DMAERR_BIT |
1991                             DMA_INTSTATUS_SGLINTR_BIT |
1992                             DMA_INTSTATUS_SWINTR_BIT);
1993
1994        /* Disable DMA channel */
1995        ps_pcie_dma_clr_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_ENABL_BIT);
1996
1997        spin_lock(&chan->channel_lock);
1998        chan->state = CHANNEL_UNAVIALBLE;
1999        spin_unlock(&chan->channel_lock);
2000}
2001
2002static u32 total_bytes_in_sgl(struct scatterlist *sgl,
2003                              unsigned int num_entries)
2004{
2005        u32 total_bytes = 0;
2006        struct scatterlist *sgl_ptr;
2007        unsigned int i;
2008
2009        for_each_sg(sgl, sgl_ptr, num_entries, i)
2010                total_bytes += sg_dma_len(sgl_ptr);
2011
2012        return total_bytes;
2013}
2014
2015static void ivk_cbk_intr_seg(struct ps_pcie_intr_segment *intr_seg,
2016                             struct ps_pcie_dma_chan *chan,
2017                             enum dmaengine_tx_result result)
2018{
2019        struct dmaengine_result rslt;
2020
2021        rslt.result = result;
2022        rslt.residue = 0;
2023
2024        spin_lock(&chan->cookie_lock);
2025        dma_cookie_complete(&intr_seg->async_intr_tx);
2026        spin_unlock(&chan->cookie_lock);
2027
2028        dmaengine_desc_get_callback_invoke(&intr_seg->async_intr_tx, &rslt);
2029}
2030
2031static void ivk_cbk_seg(struct ps_pcie_tx_segment *seg,
2032                        struct ps_pcie_dma_chan *chan,
2033                        enum dmaengine_tx_result result)
2034{
2035        struct dmaengine_result rslt, *prslt;
2036
2037        spin_lock(&chan->cookie_lock);
2038        dma_cookie_complete(&seg->async_tx);
2039        spin_unlock(&chan->cookie_lock);
2040
2041        rslt.result = result;
2042        if (seg->tx_elements.src_sgl &&
2043            chan->srcq_buffer_location == BUFFER_LOC_PCI) {
2044                rslt.residue =
2045                        total_bytes_in_sgl(seg->tx_elements.src_sgl,
2046                                           seg->tx_elements.srcq_num_elemets);
2047                prslt = &rslt;
2048        } else if (seg->tx_elements.dst_sgl &&
2049                   chan->dstq_buffer_location == BUFFER_LOC_PCI) {
2050                rslt.residue =
2051                        total_bytes_in_sgl(seg->tx_elements.dst_sgl,
2052                                           seg->tx_elements.dstq_num_elemets);
2053                prslt = &rslt;
2054        } else {
2055                prslt = NULL;
2056        }
2057
2058        dmaengine_desc_get_callback_invoke(&seg->async_tx, prslt);
2059}
2060
2061static void ivk_cbk_ctx(struct PACKET_TRANSFER_PARAMS *ppkt_ctxt,
2062                        struct ps_pcie_dma_chan *chan,
2063                        enum dmaengine_tx_result result)
2064{
2065        if (ppkt_ctxt->availability_status == IN_USE) {
2066                if (ppkt_ctxt->seg) {
2067                        ivk_cbk_seg(ppkt_ctxt->seg, chan, result);
2068                        mempool_free(ppkt_ctxt->seg,
2069                                     chan->transactions_pool);
2070                }
2071        }
2072}
2073
2074static void ivk_cbk_for_pending(struct ps_pcie_dma_chan *chan)
2075{
2076        int i;
2077        struct PACKET_TRANSFER_PARAMS *ppkt_ctxt;
2078        struct ps_pcie_tx_segment *seg, *seg_nxt;
2079        struct ps_pcie_intr_segment *intr_seg, *intr_seg_next;
2080
2081        if (chan->ppkt_ctx_srcq) {
2082                if (chan->idx_ctx_srcq_tail != chan->idx_ctx_srcq_head) {
2083                        i = chan->idx_ctx_srcq_tail;
2084                        while (i != chan->idx_ctx_srcq_head) {
2085                                ppkt_ctxt = chan->ppkt_ctx_srcq + i;
2086                                ivk_cbk_ctx(ppkt_ctxt, chan,
2087                                            DMA_TRANS_READ_FAILED);
2088                                memset(ppkt_ctxt, 0,
2089                                       sizeof(struct PACKET_TRANSFER_PARAMS));
2090                                i++;
2091                                if (i == chan->total_descriptors)
2092                                        i = 0;
2093                        }
2094                }
2095        }
2096
2097        if (chan->ppkt_ctx_dstq) {
2098                if (chan->idx_ctx_dstq_tail != chan->idx_ctx_dstq_head) {
2099                        i = chan->idx_ctx_dstq_tail;
2100                        while (i != chan->idx_ctx_dstq_head) {
2101                                ppkt_ctxt = chan->ppkt_ctx_dstq + i;
2102                                ivk_cbk_ctx(ppkt_ctxt, chan,
2103                                            DMA_TRANS_WRITE_FAILED);
2104                                memset(ppkt_ctxt, 0,
2105                                       sizeof(struct PACKET_TRANSFER_PARAMS));
2106                                i++;
2107                                if (i == chan->total_descriptors)
2108                                        i = 0;
2109                        }
2110                }
2111        }
2112
2113        list_for_each_entry_safe(seg, seg_nxt, &chan->active_list, node) {
2114                ivk_cbk_seg(seg, chan, DMA_TRANS_ABORTED);
2115                spin_lock(&chan->active_list_lock);
2116                list_del(&seg->node);
2117                spin_unlock(&chan->active_list_lock);
2118                mempool_free(seg, chan->transactions_pool);
2119        }
2120
2121        list_for_each_entry_safe(seg, seg_nxt, &chan->pending_list, node) {
2122                ivk_cbk_seg(seg, chan, DMA_TRANS_ABORTED);
2123                spin_lock(&chan->pending_list_lock);
2124                list_del(&seg->node);
2125                spin_unlock(&chan->pending_list_lock);
2126                mempool_free(seg, chan->transactions_pool);
2127        }
2128
2129        list_for_each_entry_safe(intr_seg, intr_seg_next,
2130                                 &chan->active_interrupts_list, node) {
2131                ivk_cbk_intr_seg(intr_seg, chan, DMA_TRANS_ABORTED);
2132                spin_lock(&chan->active_interrupts_lock);
2133                list_del(&intr_seg->node);
2134                spin_unlock(&chan->active_interrupts_lock);
2135                mempool_free(intr_seg, chan->intr_transactions_pool);
2136        }
2137
2138        list_for_each_entry_safe(intr_seg, intr_seg_next,
2139                                 &chan->pending_interrupts_list, node) {
2140                ivk_cbk_intr_seg(intr_seg, chan, DMA_TRANS_ABORTED);
2141                spin_lock(&chan->pending_interrupts_lock);
2142                list_del(&intr_seg->node);
2143                spin_unlock(&chan->pending_interrupts_lock);
2144                mempool_free(intr_seg, chan->intr_transactions_pool);
2145        }
2146}
2147
2148static void xlnx_ps_pcie_reset_channel(struct ps_pcie_dma_chan *chan)
2149{
2150        xlnx_ps_pcie_channel_quiesce(chan);
2151
2152        ivk_cbk_for_pending(chan);
2153
2154        ps_pcie_chan_reset(chan);
2155
2156        init_sw_components(chan);
2157        init_hw_components(chan);
2158
2159        xlnx_ps_pcie_channel_activate(chan);
2160}
2161
2162static void xlnx_ps_pcie_free_poll_timer(struct ps_pcie_dma_chan *chan)
2163{
2164        if (chan->poll_timer.function) {
2165                del_timer_sync(&chan->poll_timer);
2166                chan->poll_timer.function = NULL;
2167        }
2168}
2169
2170static int xlnx_ps_pcie_alloc_poll_timer(struct ps_pcie_dma_chan *chan)
2171{
2172        init_timer(&chan->poll_timer);
2173        chan->poll_timer.function = poll_completed_transactions;
2174        chan->poll_timer.expires = jiffies + chan->poll_timer_freq;
2175        chan->poll_timer.data = (unsigned long)chan;
2176
2177        add_timer(&chan->poll_timer);
2178
2179        return 0;
2180}
2181
2182static void terminate_transactions_work(struct work_struct *work)
2183{
2184        struct ps_pcie_dma_chan *chan =
2185                (struct ps_pcie_dma_chan *)container_of(work,
2186                        struct ps_pcie_dma_chan, handle_chan_terminate);
2187
2188        xlnx_ps_pcie_channel_quiesce(chan);
2189        ivk_cbk_for_pending(chan);
2190        xlnx_ps_pcie_channel_activate(chan);
2191
2192        complete(&chan->chan_terminate_complete);
2193}
2194
2195static void chan_shutdown_work(struct work_struct *work)
2196{
2197        struct ps_pcie_dma_chan *chan =
2198                (struct ps_pcie_dma_chan *)container_of(work,
2199                                struct ps_pcie_dma_chan, handle_chan_shutdown);
2200
2201        xlnx_ps_pcie_channel_quiesce(chan);
2202
2203        complete(&chan->chan_shutdown_complt);
2204}
2205
2206static void chan_reset_work(struct work_struct *work)
2207{
2208        struct ps_pcie_dma_chan *chan =
2209                (struct ps_pcie_dma_chan *)container_of(work,
2210                                struct ps_pcie_dma_chan, handle_chan_reset);
2211
2212        xlnx_ps_pcie_reset_channel(chan);
2213}
2214
2215static void sw_intr_work(struct work_struct *work)
2216{
2217        struct ps_pcie_dma_chan *chan =
2218                (struct ps_pcie_dma_chan *)container_of(work,
2219                                struct ps_pcie_dma_chan, handle_sw_intrs);
2220        struct ps_pcie_intr_segment *intr_seg, *intr_seg_next;
2221
2222        list_for_each_entry_safe(intr_seg, intr_seg_next,
2223                                 &chan->active_interrupts_list, node) {
2224                spin_lock(&chan->cookie_lock);
2225                dma_cookie_complete(&intr_seg->async_intr_tx);
2226                spin_unlock(&chan->cookie_lock);
2227                dmaengine_desc_get_callback_invoke(&intr_seg->async_intr_tx,
2228                                                   NULL);
2229                spin_lock(&chan->active_interrupts_lock);
2230                list_del(&intr_seg->node);
2231                spin_unlock(&chan->active_interrupts_lock);
2232        }
2233}
2234
2235static int xlnx_ps_pcie_alloc_worker_threads(struct ps_pcie_dma_chan *chan)
2236{
2237        char wq_name[WORKQ_NAME_SIZE];
2238
2239        snprintf(wq_name, WORKQ_NAME_SIZE,
2240                 "PS PCIe channel %d descriptor programming wq",
2241                 chan->channel_number);
2242        chan->chan_programming =
2243                create_singlethread_workqueue((const char *)wq_name);
2244        if (!chan->chan_programming) {
2245                dev_err(chan->dev,
2246                        "Unable to create programming wq for chan %d",
2247                        chan->channel_number);
2248                goto err_no_desc_program_wq;
2249        } else {
2250                INIT_WORK(&chan->handle_chan_programming,
2251                          ps_pcie_chan_program_work);
2252        }
2253        memset(wq_name, 0, WORKQ_NAME_SIZE);
2254
2255        snprintf(wq_name, WORKQ_NAME_SIZE,
2256                 "PS PCIe channel %d primary cleanup wq", chan->channel_number);
2257        chan->primary_desc_cleanup =
2258                create_singlethread_workqueue((const char *)wq_name);
2259        if (!chan->primary_desc_cleanup) {
2260                dev_err(chan->dev,
2261                        "Unable to create primary cleanup wq for channel %d",
2262                        chan->channel_number);
2263                goto err_no_primary_clean_wq;
2264        } else {
2265                INIT_WORK(&chan->handle_primary_desc_cleanup,
2266                          ps_pcie_chan_primary_work);
2267        }
2268        memset(wq_name, 0, WORKQ_NAME_SIZE);
2269
2270        snprintf(wq_name, WORKQ_NAME_SIZE,
2271                 "PS PCIe channel %d maintenance works wq",
2272                 chan->channel_number);
2273        chan->maintenance_workq =
2274                create_singlethread_workqueue((const char *)wq_name);
2275        if (!chan->maintenance_workq) {
2276                dev_err(chan->dev,
2277                        "Unable to create maintenance wq for channel %d",
2278                        chan->channel_number);
2279                goto err_no_maintenance_wq;
2280        } else {
2281                INIT_WORK(&chan->handle_chan_reset, chan_reset_work);
2282                INIT_WORK(&chan->handle_chan_shutdown, chan_shutdown_work);
2283                INIT_WORK(&chan->handle_chan_terminate,
2284                          terminate_transactions_work);
2285        }
2286        memset(wq_name, 0, WORKQ_NAME_SIZE);
2287
2288        snprintf(wq_name, WORKQ_NAME_SIZE,
2289                 "PS PCIe channel %d software Interrupts wq",
2290                 chan->channel_number);
2291        chan->sw_intrs_wrkq =
2292                create_singlethread_workqueue((const char *)wq_name);
2293        if (!chan->sw_intrs_wrkq) {
2294                dev_err(chan->dev,
2295                        "Unable to create sw interrupts wq for channel %d",
2296                        chan->channel_number);
2297                goto err_no_sw_intrs_wq;
2298        } else {
2299                INIT_WORK(&chan->handle_sw_intrs, sw_intr_work);
2300        }
2301        memset(wq_name, 0, WORKQ_NAME_SIZE);
2302
2303        if (chan->psrc_sgl_bd) {
2304                snprintf(wq_name, WORKQ_NAME_SIZE,
2305                         "PS PCIe channel %d srcq handling wq",
2306                         chan->channel_number);
2307                chan->srcq_desc_cleanup =
2308                        create_singlethread_workqueue((const char *)wq_name);
2309                if (!chan->srcq_desc_cleanup) {
2310                        dev_err(chan->dev,
2311                                "Unable to create src q completion wq chan %d",
2312                                chan->channel_number);
2313                        goto err_no_src_q_completion_wq;
2314                } else {
2315                        INIT_WORK(&chan->handle_srcq_desc_cleanup,
2316                                  src_cleanup_work);
2317                }
2318                memset(wq_name, 0, WORKQ_NAME_SIZE);
2319        }
2320
2321        if (chan->pdst_sgl_bd) {
2322                snprintf(wq_name, WORKQ_NAME_SIZE,
2323                         "PS PCIe channel %d dstq handling wq",
2324                         chan->channel_number);
2325                chan->dstq_desc_cleanup =
2326                        create_singlethread_workqueue((const char *)wq_name);
2327                if (!chan->dstq_desc_cleanup) {
2328                        dev_err(chan->dev,
2329                                "Unable to create dst q completion wq chan %d",
2330                                chan->channel_number);
2331                        goto err_no_dst_q_completion_wq;
2332                } else {
2333                        INIT_WORK(&chan->handle_dstq_desc_cleanup,
2334                                  dst_cleanup_work);
2335                }
2336                memset(wq_name, 0, WORKQ_NAME_SIZE);
2337        }
2338
2339        return 0;
2340err_no_dst_q_completion_wq:
2341        if (chan->srcq_desc_cleanup)
2342                destroy_workqueue(chan->srcq_desc_cleanup);
2343err_no_src_q_completion_wq:
2344        if (chan->sw_intrs_wrkq)
2345                destroy_workqueue(chan->sw_intrs_wrkq);
2346err_no_sw_intrs_wq:
2347        if (chan->maintenance_workq)
2348                destroy_workqueue(chan->maintenance_workq);
2349err_no_maintenance_wq:
2350        if (chan->primary_desc_cleanup)
2351                destroy_workqueue(chan->primary_desc_cleanup);
2352err_no_primary_clean_wq:
2353        if (chan->chan_programming)
2354                destroy_workqueue(chan->chan_programming);
2355err_no_desc_program_wq:
2356        return -ENOMEM;
2357}
2358
2359static int xlnx_ps_pcie_alloc_mempool(struct ps_pcie_dma_chan *chan)
2360{
2361        chan->transactions_pool =
2362                mempool_create_kmalloc_pool(chan->total_descriptors,
2363                                            sizeof(struct ps_pcie_tx_segment));
2364
2365        if (!chan->transactions_pool)
2366                goto no_transactions_pool;
2367
2368        chan->intr_transactions_pool =
2369        mempool_create_kmalloc_pool(MIN_SW_INTR_TRANSACTIONS,
2370                                    sizeof(struct ps_pcie_intr_segment));
2371
2372        if (!chan->intr_transactions_pool)
2373                goto no_intr_transactions_pool;
2374
2375        return 0;
2376
2377no_intr_transactions_pool:
2378        mempool_destroy(chan->transactions_pool);
2379
2380no_transactions_pool:
2381        return -ENOMEM;
2382}
2383
2384static int xlnx_ps_pcie_alloc_pkt_contexts(struct ps_pcie_dma_chan *chan)
2385{
2386        if (chan->psrc_sgl_bd) {
2387                chan->ppkt_ctx_srcq =
2388                        kcalloc(chan->total_descriptors,
2389                                sizeof(struct PACKET_TRANSFER_PARAMS),
2390                                GFP_KERNEL);
2391                if (!chan->ppkt_ctx_srcq) {
2392                        dev_err(chan->dev,
2393                                "Src pkt cxt allocation for chan %d failed\n",
2394                                chan->channel_number);
2395                        goto err_no_src_pkt_ctx;
2396                }
2397        }
2398
2399        if (chan->pdst_sgl_bd) {
2400                chan->ppkt_ctx_dstq =
2401                        kcalloc(chan->total_descriptors,
2402                                sizeof(struct PACKET_TRANSFER_PARAMS),
2403                                GFP_KERNEL);
2404                if (!chan->ppkt_ctx_dstq) {
2405                        dev_err(chan->dev,
2406                                "Dst pkt cxt for chan %d failed\n",
2407                                chan->channel_number);
2408                        goto err_no_dst_pkt_ctx;
2409                }
2410        }
2411
2412        return 0;
2413
2414err_no_dst_pkt_ctx:
2415        kfree(chan->ppkt_ctx_srcq);
2416
2417err_no_src_pkt_ctx:
2418        return -ENOMEM;
2419}
2420
2421static int dma_alloc_descriptors_two_queues(struct ps_pcie_dma_chan *chan)
2422{
2423        size_t size;
2424
2425        void *sgl_base;
2426        void *sta_base;
2427        dma_addr_t phy_addr_sglbase;
2428        dma_addr_t phy_addr_stabase;
2429
2430        size = chan->total_descriptors *
2431                sizeof(struct SOURCE_DMA_DESCRIPTOR);
2432
2433        sgl_base = dma_zalloc_coherent(chan->dev, size, &phy_addr_sglbase,
2434                                       GFP_KERNEL);
2435
2436        if (!sgl_base) {
2437                dev_err(chan->dev,
2438                        "Sgl bds in two channel mode for chan %d failed\n",
2439                        chan->channel_number);
2440                goto err_no_sgl_bds;
2441        }
2442
2443        size = chan->total_descriptors * sizeof(struct STATUS_DMA_DESCRIPTOR);
2444        sta_base = dma_zalloc_coherent(chan->dev, size, &phy_addr_stabase,
2445                                       GFP_KERNEL);
2446
2447        if (!sta_base) {
2448                dev_err(chan->dev,
2449                        "Sta bds in two channel mode for chan %d failed\n",
2450                        chan->channel_number);
2451                goto err_no_sta_bds;
2452        }
2453
2454        if (chan->direction == DMA_TO_DEVICE) {
2455                chan->psrc_sgl_bd = sgl_base;
2456                chan->src_sgl_bd_pa = phy_addr_sglbase;
2457
2458                chan->psrc_sta_bd = sta_base;
2459                chan->src_sta_bd_pa = phy_addr_stabase;
2460
2461                chan->pdst_sgl_bd = NULL;
2462                chan->dst_sgl_bd_pa = 0;
2463
2464                chan->pdst_sta_bd = NULL;
2465                chan->dst_sta_bd_pa = 0;
2466
2467        } else if (chan->direction == DMA_FROM_DEVICE) {
2468                chan->psrc_sgl_bd = NULL;
2469                chan->src_sgl_bd_pa = 0;
2470
2471                chan->psrc_sta_bd = NULL;
2472                chan->src_sta_bd_pa = 0;
2473
2474                chan->pdst_sgl_bd = sgl_base;
2475                chan->dst_sgl_bd_pa = phy_addr_sglbase;
2476
2477                chan->pdst_sta_bd = sta_base;
2478                chan->dst_sta_bd_pa = phy_addr_stabase;
2479
2480        } else {
2481                dev_err(chan->dev,
2482                        "%d %s() Unsupported channel direction\n",
2483                        __LINE__, __func__);
2484                goto unsupported_channel_direction;
2485        }
2486
2487        return 0;
2488
2489unsupported_channel_direction:
2490        size = chan->total_descriptors *
2491                sizeof(struct STATUS_DMA_DESCRIPTOR);
2492        dma_free_coherent(chan->dev, size, sta_base, phy_addr_stabase);
2493err_no_sta_bds:
2494        size = chan->total_descriptors *
2495                sizeof(struct SOURCE_DMA_DESCRIPTOR);
2496        dma_free_coherent(chan->dev, size, sgl_base, phy_addr_sglbase);
2497err_no_sgl_bds:
2498
2499        return -ENOMEM;
2500}
2501
2502static int dma_alloc_decriptors_all_queues(struct ps_pcie_dma_chan *chan)
2503{
2504        size_t size;
2505
2506        size = chan->total_descriptors *
2507                sizeof(struct SOURCE_DMA_DESCRIPTOR);
2508        chan->psrc_sgl_bd =
2509                dma_zalloc_coherent(chan->dev, size, &chan->src_sgl_bd_pa,
2510                                    GFP_KERNEL);
2511
2512        if (!chan->psrc_sgl_bd) {
2513                dev_err(chan->dev,
2514                        "Alloc fail src q buffer descriptors for chan %d\n",
2515                        chan->channel_number);
2516                goto err_no_src_sgl_descriptors;
2517        }
2518
2519        size = chan->total_descriptors * sizeof(struct DEST_DMA_DESCRIPTOR);
2520        chan->pdst_sgl_bd =
2521                dma_zalloc_coherent(chan->dev, size, &chan->dst_sgl_bd_pa,
2522                                    GFP_KERNEL);
2523
2524        if (!chan->pdst_sgl_bd) {
2525                dev_err(chan->dev,
2526                        "Alloc fail dst q buffer descriptors for chan %d\n",
2527                        chan->channel_number);
2528                goto err_no_dst_sgl_descriptors;
2529        }
2530
2531        size = chan->total_descriptors * sizeof(struct STATUS_DMA_DESCRIPTOR);
2532        chan->psrc_sta_bd =
2533                dma_zalloc_coherent(chan->dev, size, &chan->src_sta_bd_pa,
2534                                    GFP_KERNEL);
2535
2536        if (!chan->psrc_sta_bd) {
2537                dev_err(chan->dev,
2538                        "Unable to allocate src q status bds for chan %d\n",
2539                        chan->channel_number);
2540                goto err_no_src_sta_descriptors;
2541        }
2542
2543        chan->pdst_sta_bd =
2544                dma_zalloc_coherent(chan->dev, size, &chan->dst_sta_bd_pa,
2545                                    GFP_KERNEL);
2546
2547        if (!chan->pdst_sta_bd) {
2548                dev_err(chan->dev,
2549                        "Unable to allocate Dst q status bds for chan %d\n",
2550                        chan->channel_number);
2551                goto err_no_dst_sta_descriptors;
2552        }
2553
2554        return 0;
2555
2556err_no_dst_sta_descriptors:
2557        size = chan->total_descriptors *
2558                sizeof(struct STATUS_DMA_DESCRIPTOR);
2559        dma_free_coherent(chan->dev, size, chan->psrc_sta_bd,
2560                          chan->src_sta_bd_pa);
2561err_no_src_sta_descriptors:
2562        size = chan->total_descriptors *
2563                sizeof(struct DEST_DMA_DESCRIPTOR);
2564        dma_free_coherent(chan->dev, size, chan->pdst_sgl_bd,
2565                          chan->dst_sgl_bd_pa);
2566err_no_dst_sgl_descriptors:
2567        size = chan->total_descriptors *
2568                sizeof(struct SOURCE_DMA_DESCRIPTOR);
2569        dma_free_coherent(chan->dev, size, chan->psrc_sgl_bd,
2570                          chan->src_sgl_bd_pa);
2571
2572err_no_src_sgl_descriptors:
2573        return -ENOMEM;
2574}
2575
2576static void xlnx_ps_pcie_dma_free_chan_resources(struct dma_chan *dchan)
2577{
2578        struct ps_pcie_dma_chan *chan;
2579
2580        if (!dchan)
2581                return;
2582
2583        chan = to_xilinx_chan(dchan);
2584
2585        if (chan->state == CHANNEL_RESOURCE_UNALLOCATED)
2586                return;
2587
2588        if (chan->maintenance_workq) {
2589                if (completion_done(&chan->chan_shutdown_complt))
2590                        reinit_completion(&chan->chan_shutdown_complt);
2591                queue_work(chan->maintenance_workq,
2592                           &chan->handle_chan_shutdown);
2593                wait_for_completion_interruptible(&chan->chan_shutdown_complt);
2594
2595                xlnx_ps_pcie_free_worker_queues(chan);
2596                xlnx_ps_pcie_free_pkt_ctxts(chan);
2597                xlnx_ps_pcie_destroy_mempool(chan);
2598                xlnx_ps_pcie_free_descriptors(chan);
2599
2600                spin_lock(&chan->channel_lock);
2601                chan->state = CHANNEL_RESOURCE_UNALLOCATED;
2602                spin_unlock(&chan->channel_lock);
2603        }
2604}
2605
2606static int xlnx_ps_pcie_dma_alloc_chan_resources(struct dma_chan *dchan)
2607{
2608        struct ps_pcie_dma_chan *chan;
2609
2610        if (!dchan)
2611                return PTR_ERR(dchan);
2612
2613        chan = to_xilinx_chan(dchan);
2614
2615        if (chan->state != CHANNEL_RESOURCE_UNALLOCATED)
2616                return 0;
2617
2618        if (chan->num_queues == DEFAULT_DMA_QUEUES) {
2619                if (dma_alloc_decriptors_all_queues(chan) != 0) {
2620                        dev_err(chan->dev,
2621                                "Alloc fail bds for channel %d\n",
2622                                chan->channel_number);
2623                        goto err_no_descriptors;
2624                }
2625        } else if (chan->num_queues == TWO_DMA_QUEUES) {
2626                if (dma_alloc_descriptors_two_queues(chan) != 0) {
2627                        dev_err(chan->dev,
2628                                "Alloc fail bds for two queues of channel %d\n",
2629                        chan->channel_number);
2630                        goto err_no_descriptors;
2631                }
2632        }
2633
2634        if (xlnx_ps_pcie_alloc_mempool(chan) != 0) {
2635                dev_err(chan->dev,
2636                        "Unable to allocate memory pool for channel %d\n",
2637                        chan->channel_number);
2638                goto err_no_mempools;
2639        }
2640
2641        if (xlnx_ps_pcie_alloc_pkt_contexts(chan) != 0) {
2642                dev_err(chan->dev,
2643                        "Unable to allocate packet contexts for channel %d\n",
2644                        chan->channel_number);
2645                goto err_no_pkt_ctxts;
2646        }
2647
2648        if (xlnx_ps_pcie_alloc_worker_threads(chan) != 0) {
2649                dev_err(chan->dev,
2650                        "Unable to allocate worker queues for channel %d\n",
2651                        chan->channel_number);
2652                goto err_no_worker_queues;
2653        }
2654
2655        xlnx_ps_pcie_reset_channel(chan);
2656
2657        dma_cookie_init(dchan);
2658
2659        return 0;
2660
2661err_no_worker_queues:
2662        xlnx_ps_pcie_free_pkt_ctxts(chan);
2663err_no_pkt_ctxts:
2664        xlnx_ps_pcie_destroy_mempool(chan);
2665err_no_mempools:
2666        xlnx_ps_pcie_free_descriptors(chan);
2667err_no_descriptors:
2668        return -ENOMEM;
2669}
2670
2671static dma_cookie_t xilinx_intr_tx_submit(struct dma_async_tx_descriptor *tx)
2672{
2673        struct ps_pcie_intr_segment *intr_seg =
2674                to_ps_pcie_dma_tx_intr_descriptor(tx);
2675        struct ps_pcie_dma_chan *chan = to_xilinx_chan(tx->chan);
2676        dma_cookie_t cookie;
2677
2678        if (chan->state != CHANNEL_AVAILABLE)
2679                return -EINVAL;
2680
2681        spin_lock(&chan->cookie_lock);
2682        cookie = dma_cookie_assign(tx);
2683        spin_unlock(&chan->cookie_lock);
2684
2685        spin_lock(&chan->pending_interrupts_lock);
2686        list_add_tail(&intr_seg->node, &chan->pending_interrupts_list);
2687        spin_unlock(&chan->pending_interrupts_lock);
2688
2689        return cookie;
2690}
2691
2692static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
2693{
2694        struct ps_pcie_tx_segment *seg = to_ps_pcie_dma_tx_descriptor(tx);
2695        struct ps_pcie_dma_chan *chan = to_xilinx_chan(tx->chan);
2696        dma_cookie_t cookie;
2697
2698        if (chan->state != CHANNEL_AVAILABLE)
2699                return -EINVAL;
2700
2701        spin_lock(&chan->cookie_lock);
2702        cookie = dma_cookie_assign(tx);
2703        spin_unlock(&chan->cookie_lock);
2704
2705        spin_lock(&chan->pending_list_lock);
2706        list_add_tail(&seg->node, &chan->pending_list);
2707        spin_unlock(&chan->pending_list_lock);
2708
2709        return cookie;
2710}
2711
2712static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_dma_sg(
2713                struct dma_chan *channel, struct scatterlist *dst_sg,
2714                unsigned int dst_nents, struct scatterlist *src_sg,
2715                unsigned int src_nents, unsigned long flags)
2716{
2717        struct ps_pcie_dma_chan *chan = to_xilinx_chan(channel);
2718        struct ps_pcie_tx_segment *seg = NULL;
2719
2720        if (chan->state != CHANNEL_AVAILABLE)
2721                return NULL;
2722
2723        if (dst_nents == 0 || src_nents == 0)
2724                return NULL;
2725
2726        if (!dst_sg || !src_sg)
2727                return NULL;
2728
2729        if (chan->num_queues != DEFAULT_DMA_QUEUES) {
2730                dev_err(chan->dev, "Only prep_slave_sg for channel %d\n",
2731                        chan->channel_number);
2732                return NULL;
2733        }
2734
2735        seg = mempool_alloc(chan->transactions_pool, GFP_ATOMIC);
2736        if (!seg) {
2737                dev_err(chan->dev, "Tx segment alloc for channel %d\n",
2738                        chan->channel_number);
2739                return NULL;
2740        }
2741
2742        memset(seg, 0, sizeof(*seg));
2743
2744        seg->tx_elements.dst_sgl = dst_sg;
2745        seg->tx_elements.dstq_num_elemets = dst_nents;
2746        seg->tx_elements.src_sgl = src_sg;
2747        seg->tx_elements.srcq_num_elemets = src_nents;
2748
2749        dma_async_tx_descriptor_init(&seg->async_tx, &chan->common);
2750        seg->async_tx.flags = flags;
2751        async_tx_ack(&seg->async_tx);
2752        seg->async_tx.tx_submit = xilinx_dma_tx_submit;
2753
2754        return &seg->async_tx;
2755}
2756
2757static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_slave_sg(
2758                struct dma_chan *channel, struct scatterlist *sgl,
2759                unsigned int sg_len, enum dma_transfer_direction direction,
2760                unsigned long flags, void *context)
2761{
2762        struct ps_pcie_dma_chan *chan = to_xilinx_chan(channel);
2763        struct ps_pcie_tx_segment *seg = NULL;
2764
2765        if (chan->state != CHANNEL_AVAILABLE)
2766                return NULL;
2767
2768        if (!(is_slave_direction(direction)))
2769                return NULL;
2770
2771        if (!sgl || sg_len == 0)
2772                return NULL;
2773
2774        if (chan->num_queues != TWO_DMA_QUEUES) {
2775                dev_err(chan->dev, "Only prep_dma_sg is supported channel %d\n",
2776                        chan->channel_number);
2777                return NULL;
2778        }
2779
2780        seg = mempool_alloc(chan->transactions_pool, GFP_ATOMIC);
2781        if (!seg) {
2782                dev_err(chan->dev, "Unable to allocate tx segment channel %d\n",
2783                        chan->channel_number);
2784                return NULL;
2785        }
2786
2787        memset(seg, 0, sizeof(*seg));
2788
2789        if (chan->direction == DMA_TO_DEVICE) {
2790                seg->tx_elements.src_sgl = sgl;
2791                seg->tx_elements.srcq_num_elemets = sg_len;
2792                seg->tx_elements.dst_sgl = NULL;
2793                seg->tx_elements.dstq_num_elemets = 0;
2794        } else {
2795                seg->tx_elements.src_sgl = NULL;
2796                seg->tx_elements.srcq_num_elemets = 0;
2797                seg->tx_elements.dst_sgl = sgl;
2798                seg->tx_elements.dstq_num_elemets = sg_len;
2799        }
2800
2801        dma_async_tx_descriptor_init(&seg->async_tx, &chan->common);
2802        seg->async_tx.flags = flags;
2803        async_tx_ack(&seg->async_tx);
2804        seg->async_tx.tx_submit = xilinx_dma_tx_submit;
2805
2806        return &seg->async_tx;
2807}
2808
2809static void xlnx_ps_pcie_dma_issue_pending(struct dma_chan *channel)
2810{
2811        struct ps_pcie_dma_chan *chan;
2812
2813        if (!channel)
2814                return;
2815
2816        chan = to_xilinx_chan(channel);
2817
2818        if (!list_empty(&chan->pending_list)) {
2819                spin_lock(&chan->pending_list_lock);
2820                spin_lock(&chan->active_list_lock);
2821                list_splice_tail_init(&chan->pending_list,
2822                                      &chan->active_list);
2823                spin_unlock(&chan->active_list_lock);
2824                spin_unlock(&chan->pending_list_lock);
2825        }
2826
2827        if (!list_empty(&chan->pending_interrupts_list)) {
2828                spin_lock(&chan->pending_interrupts_lock);
2829                spin_lock(&chan->active_interrupts_lock);
2830                list_splice_tail_init(&chan->pending_interrupts_list,
2831                                      &chan->active_interrupts_list);
2832                spin_unlock(&chan->active_interrupts_lock);
2833                spin_unlock(&chan->pending_interrupts_lock);
2834        }
2835
2836        if (chan->chan_programming)
2837                queue_work(chan->chan_programming,
2838                           &chan->handle_chan_programming);
2839}
2840
2841static int xlnx_ps_pcie_dma_terminate_all(struct dma_chan *channel)
2842{
2843        struct ps_pcie_dma_chan *chan;
2844
2845        if (!channel)
2846                return PTR_ERR(channel);
2847
2848        chan = to_xilinx_chan(channel);
2849
2850        if (chan->state != CHANNEL_AVAILABLE)
2851                return 1;
2852
2853        if (chan->maintenance_workq) {
2854                if (completion_done(&chan->chan_terminate_complete))
2855                        reinit_completion(&chan->chan_terminate_complete);
2856                queue_work(chan->maintenance_workq,
2857                           &chan->handle_chan_terminate);
2858                wait_for_completion_interruptible(
2859                           &chan->chan_terminate_complete);
2860        }
2861
2862        return 0;
2863}
2864
2865static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_interrupt(
2866                struct dma_chan *channel, unsigned long flags)
2867{
2868        struct ps_pcie_dma_chan *chan;
2869        struct ps_pcie_intr_segment *intr_segment = NULL;
2870
2871        if (!channel)
2872                return NULL;
2873
2874        chan = to_xilinx_chan(channel);
2875
2876        if (chan->state != CHANNEL_AVAILABLE)
2877                return NULL;
2878
2879        intr_segment = mempool_alloc(chan->intr_transactions_pool, GFP_ATOMIC);
2880
2881        memset(intr_segment, 0, sizeof(*intr_segment));
2882
2883        dma_async_tx_descriptor_init(&intr_segment->async_intr_tx,
2884                                     &chan->common);
2885        intr_segment->async_intr_tx.flags = flags;
2886        async_tx_ack(&intr_segment->async_intr_tx);
2887        intr_segment->async_intr_tx.tx_submit = xilinx_intr_tx_submit;
2888
2889        return &intr_segment->async_intr_tx;
2890}
2891
2892static int xlnx_pcie_dma_driver_probe(struct platform_device *platform_dev)
2893{
2894        int err, i;
2895        struct xlnx_pcie_dma_device *xdev;
2896        static u16 board_number;
2897
2898        xdev = devm_kzalloc(&platform_dev->dev,
2899                            sizeof(struct xlnx_pcie_dma_device), GFP_KERNEL);
2900
2901        if (!xdev)
2902                return -ENOMEM;
2903
2904#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2905        xdev->dma_buf_ext_addr = true;
2906#else
2907        xdev->dma_buf_ext_addr = false;
2908#endif
2909
2910        xdev->is_rootdma = device_property_read_bool(&platform_dev->dev,
2911                                                     "rootdma");
2912
2913        xdev->dev = &platform_dev->dev;
2914        xdev->board_number = board_number;
2915
2916        err = device_property_read_u32(&platform_dev->dev, "numchannels",
2917                                       &xdev->num_channels);
2918        if (err) {
2919                dev_err(&platform_dev->dev,
2920                        "Unable to find numchannels property\n");
2921                goto platform_driver_probe_return;
2922        }
2923
2924        if (xdev->num_channels == 0 || xdev->num_channels >
2925                MAX_ALLOWED_CHANNELS_IN_HW) {
2926                dev_warn(&platform_dev->dev,
2927                         "Invalid xlnx-num_channels property value\n");
2928                xdev->num_channels = MAX_ALLOWED_CHANNELS_IN_HW;
2929        }
2930
2931        xdev->channels =
2932        (struct ps_pcie_dma_chan *)devm_kzalloc(&platform_dev->dev,
2933                                                sizeof(struct ps_pcie_dma_chan)
2934                                                        * xdev->num_channels,
2935                                                GFP_KERNEL);
2936        if (!xdev->channels) {
2937                err = -ENOMEM;
2938                goto platform_driver_probe_return;
2939        }
2940
2941        if (xdev->is_rootdma)
2942                err = read_rootdma_config(platform_dev, xdev);
2943        else
2944                err = read_epdma_config(platform_dev, xdev);
2945
2946        if (err) {
2947                dev_err(&platform_dev->dev,
2948                        "Unable to initialize dma configuration\n");
2949                goto platform_driver_probe_return;
2950        }
2951
2952        /* Initialize the DMA engine */
2953        INIT_LIST_HEAD(&xdev->common.channels);
2954
2955        dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2956        dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2957        dma_cap_set(DMA_SG, xdev->common.cap_mask);
2958        dma_cap_set(DMA_INTERRUPT, xdev->common.cap_mask);
2959
2960        xdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_UNDEFINED;
2961        xdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_UNDEFINED;
2962        xdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2963        xdev->common.device_alloc_chan_resources =
2964                xlnx_ps_pcie_dma_alloc_chan_resources;
2965        xdev->common.device_free_chan_resources =
2966                xlnx_ps_pcie_dma_free_chan_resources;
2967        xdev->common.device_terminate_all = xlnx_ps_pcie_dma_terminate_all;
2968        xdev->common.device_tx_status =  dma_cookie_status;
2969        xdev->common.device_issue_pending = xlnx_ps_pcie_dma_issue_pending;
2970        xdev->common.device_prep_dma_interrupt =
2971                xlnx_ps_pcie_dma_prep_interrupt;
2972        xdev->common.device_prep_dma_sg = xlnx_ps_pcie_dma_prep_dma_sg;
2973        xdev->common.device_prep_slave_sg = xlnx_ps_pcie_dma_prep_slave_sg;
2974        xdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2975
2976        for (i = 0; i < xdev->num_channels; i++) {
2977                err = probe_channel_properties(platform_dev, xdev, i);
2978
2979                if (err != 0) {
2980                        dev_err(xdev->dev,
2981                                "Unable to read channel properties\n");
2982                        goto platform_driver_probe_return;
2983                }
2984        }
2985
2986        if (xdev->is_rootdma)
2987                err = platform_irq_setup(xdev);
2988        else
2989                err = irq_setup(xdev);
2990        if (err) {
2991                dev_err(xdev->dev, "Cannot request irq lines for device %d\n",
2992                        xdev->board_number);
2993                goto platform_driver_probe_return;
2994        }
2995
2996        err = dma_async_device_register(&xdev->common);
2997        if (err) {
2998                dev_err(xdev->dev,
2999                        "Unable to register board %d with dma framework\n",
3000                        xdev->board_number);
3001                goto platform_driver_probe_return;
3002        }
3003
3004        platform_set_drvdata(platform_dev, xdev);
3005
3006        board_number++;
3007
3008        dev_info(&platform_dev->dev, "PS PCIe Platform driver probed\n");
3009        return 0;
3010
3011platform_driver_probe_return:
3012        return err;
3013}
3014
3015static int xlnx_pcie_dma_driver_remove(struct platform_device *platform_dev)
3016{
3017        struct xlnx_pcie_dma_device *xdev =
3018                platform_get_drvdata(platform_dev);
3019        int i;
3020
3021        for (i = 0; i < xdev->num_channels; i++)
3022                xlnx_ps_pcie_dma_free_chan_resources(&xdev->channels[i].common);
3023
3024        dma_async_device_unregister(&xdev->common);
3025
3026        return 0;
3027}
3028
3029#ifdef CONFIG_OF
3030static const struct of_device_id xlnx_pcie_root_dma_of_ids[] = {
3031        { .compatible = "xlnx,ps_pcie_dma-1.00.a", },
3032        {}
3033};
3034MODULE_DEVICE_TABLE(of, xlnx_pcie_root_dma_of_ids);
3035#endif
3036
3037static struct platform_driver xlnx_pcie_dma_driver = {
3038        .driver = {
3039                .name = XLNX_PLATFORM_DRIVER_NAME,
3040                .of_match_table = of_match_ptr(xlnx_pcie_root_dma_of_ids),
3041                .owner = THIS_MODULE,
3042        },
3043        .probe =  xlnx_pcie_dma_driver_probe,
3044        .remove = xlnx_pcie_dma_driver_remove,
3045};
3046
3047int dma_platform_driver_register(void)
3048{
3049        return platform_driver_register(&xlnx_pcie_dma_driver);
3050}
3051
3052void dma_platform_driver_unregister(void)
3053{
3054        platform_driver_unregister(&xlnx_pcie_dma_driver);
3055}
3056