linux/drivers/hid/intel-ish-hid/ishtp/client.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * ISHTP client logic
   4 *
   5 * Copyright (c) 2003-2016, Intel Corporation.
   6 */
   7
   8#include <linux/slab.h>
   9#include <linux/sched.h>
  10#include <linux/wait.h>
  11#include <linux/delay.h>
  12#include <linux/dma-mapping.h>
  13#include <asm/cacheflush.h>
  14#include "hbm.h"
  15#include "client.h"
  16
  17int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl)
  18{
  19        unsigned long tx_free_flags;
  20        int size;
  21
  22        spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
  23        size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length;
  24        spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
  25
  26        return size;
  27}
  28EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size);
  29
  30int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl)
  31{
  32        return cl->tx_ring_free_size;
  33}
  34EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings);
  35
  36/**
  37 * ishtp_read_list_flush() - Flush read queue
  38 * @cl: ishtp client instance
  39 *
  40 * Used to remove all entries from read queue for a client
  41 */
  42static void ishtp_read_list_flush(struct ishtp_cl *cl)
  43{
  44        struct ishtp_cl_rb *rb;
  45        struct ishtp_cl_rb *next;
  46        unsigned long   flags;
  47
  48        spin_lock_irqsave(&cl->dev->read_list_spinlock, flags);
  49        list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
  50                if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
  51                        list_del(&rb->list);
  52                        ishtp_io_rb_free(rb);
  53                }
  54        spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
  55}
  56
  57/**
  58 * ishtp_cl_flush_queues() - Flush all queues for a client
  59 * @cl: ishtp client instance
  60 *
  61 * Used to remove all queues for a client. This is called when a client device
  62 * needs reset due to error, S3 resume or during module removal
  63 *
  64 * Return: 0 on success else -EINVAL if device is NULL
  65 */
  66int ishtp_cl_flush_queues(struct ishtp_cl *cl)
  67{
  68        if (WARN_ON(!cl || !cl->dev))
  69                return -EINVAL;
  70
  71        ishtp_read_list_flush(cl);
  72
  73        return 0;
  74}
  75EXPORT_SYMBOL(ishtp_cl_flush_queues);
  76
  77/**
  78 * ishtp_cl_init() - Initialize all fields of a client device
  79 * @cl: ishtp client instance
  80 * @dev: ishtp device
  81 *
  82 * Initializes a client device fields: Init spinlocks, init queues etc.
  83 * This function is called during new client creation
  84 */
  85static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
  86{
  87        memset(cl, 0, sizeof(struct ishtp_cl));
  88        init_waitqueue_head(&cl->wait_ctrl_res);
  89        spin_lock_init(&cl->free_list_spinlock);
  90        spin_lock_init(&cl->in_process_spinlock);
  91        spin_lock_init(&cl->tx_list_spinlock);
  92        spin_lock_init(&cl->tx_free_list_spinlock);
  93        spin_lock_init(&cl->fc_spinlock);
  94        INIT_LIST_HEAD(&cl->link);
  95        cl->dev = dev;
  96
  97        INIT_LIST_HEAD(&cl->free_rb_list.list);
  98        INIT_LIST_HEAD(&cl->tx_list.list);
  99        INIT_LIST_HEAD(&cl->tx_free_list.list);
 100        INIT_LIST_HEAD(&cl->in_process_list.list);
 101
 102        cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
 103        cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
 104        cl->tx_ring_free_size = cl->tx_ring_size;
 105
 106        /* dma */
 107        cl->last_tx_path = CL_TX_PATH_IPC;
 108        cl->last_dma_acked = 1;
 109        cl->last_dma_addr = NULL;
 110        cl->last_ipc_acked = 1;
 111}
 112
 113/**
 114 * ishtp_cl_allocate() - allocates client structure and sets it up.
 115 * @cl_device: ishtp client device
 116 *
 117 * Allocate memory for new client device and call to initialize each field.
 118 *
 119 * Return: The allocated client instance or NULL on failure
 120 */
 121struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device)
 122{
 123        struct ishtp_cl *cl;
 124
 125        cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL);
 126        if (!cl)
 127                return NULL;
 128
 129        ishtp_cl_init(cl, cl_device->ishtp_dev);
 130        return cl;
 131}
 132EXPORT_SYMBOL(ishtp_cl_allocate);
 133
 134/**
 135 * ishtp_cl_free() - Frees a client device
 136 * @cl: client device instance
 137 *
 138 * Frees a client device
 139 */
 140void    ishtp_cl_free(struct ishtp_cl *cl)
 141{
 142        struct ishtp_device *dev;
 143        unsigned long flags;
 144
 145        if (!cl)
 146                return;
 147
 148        dev = cl->dev;
 149        if (!dev)
 150                return;
 151
 152        spin_lock_irqsave(&dev->cl_list_lock, flags);
 153        ishtp_cl_free_rx_ring(cl);
 154        ishtp_cl_free_tx_ring(cl);
 155        kfree(cl);
 156        spin_unlock_irqrestore(&dev->cl_list_lock, flags);
 157}
 158EXPORT_SYMBOL(ishtp_cl_free);
 159
 160/**
 161 * ishtp_cl_link() - Reserve a host id and link the client instance
 162 * @cl: client device instance
 163 *
 164 * This allocates a single bit in the hostmap. This function will make sure
 165 * that not many client sessions are opened at the same time. Once allocated
 166 * the client device instance is added to the ishtp device in the current
 167 * client list
 168 *
 169 * Return: 0 or error code on failure
 170 */
 171int ishtp_cl_link(struct ishtp_cl *cl)
 172{
 173        struct ishtp_device *dev;
 174        unsigned long flags, flags_cl;
 175        int id, ret = 0;
 176
 177        if (WARN_ON(!cl || !cl->dev))
 178                return -EINVAL;
 179
 180        dev = cl->dev;
 181
 182        spin_lock_irqsave(&dev->device_lock, flags);
 183
 184        if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) {
 185                ret = -EMFILE;
 186                goto unlock_dev;
 187        }
 188
 189        id = find_first_zero_bit(dev->host_clients_map, ISHTP_CLIENTS_MAX);
 190
 191        if (id >= ISHTP_CLIENTS_MAX) {
 192                spin_unlock_irqrestore(&dev->device_lock, flags);
 193                dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX);
 194                return -ENOENT;
 195        }
 196
 197        dev->open_handle_count++;
 198        cl->host_client_id = id;
 199        spin_lock_irqsave(&dev->cl_list_lock, flags_cl);
 200        if (dev->dev_state != ISHTP_DEV_ENABLED) {
 201                ret = -ENODEV;
 202                goto unlock_cl;
 203        }
 204        list_add_tail(&cl->link, &dev->cl_list);
 205        set_bit(id, dev->host_clients_map);
 206        cl->state = ISHTP_CL_INITIALIZING;
 207
 208unlock_cl:
 209        spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl);
 210unlock_dev:
 211        spin_unlock_irqrestore(&dev->device_lock, flags);
 212        return ret;
 213}
 214EXPORT_SYMBOL(ishtp_cl_link);
 215
 216/**
 217 * ishtp_cl_unlink() - remove fw_cl from the client device list
 218 * @cl: client device instance
 219 *
 220 * Remove a previously linked device to a ishtp device
 221 */
 222void ishtp_cl_unlink(struct ishtp_cl *cl)
 223{
 224        struct ishtp_device *dev;
 225        struct ishtp_cl *pos;
 226        unsigned long   flags;
 227
 228        /* don't shout on error exit path */
 229        if (!cl || !cl->dev)
 230                return;
 231
 232        dev = cl->dev;
 233
 234        spin_lock_irqsave(&dev->device_lock, flags);
 235        if (dev->open_handle_count > 0) {
 236                clear_bit(cl->host_client_id, dev->host_clients_map);
 237                dev->open_handle_count--;
 238        }
 239        spin_unlock_irqrestore(&dev->device_lock, flags);
 240
 241        /*
 242         * This checks that 'cl' is actually linked into device's structure,
 243         * before attempting 'list_del'
 244         */
 245        spin_lock_irqsave(&dev->cl_list_lock, flags);
 246        list_for_each_entry(pos, &dev->cl_list, link)
 247                if (cl->host_client_id == pos->host_client_id) {
 248                        list_del_init(&pos->link);
 249                        break;
 250                }
 251        spin_unlock_irqrestore(&dev->cl_list_lock, flags);
 252}
 253EXPORT_SYMBOL(ishtp_cl_unlink);
 254
 255/**
 256 * ishtp_cl_disconnect() - Send disconnect request to firmware
 257 * @cl: client device instance
 258 *
 259 * Send a disconnect request for a client to firmware.
 260 *
 261 * Return: 0 if successful disconnect response from the firmware or error
 262 * code on failure
 263 */
 264int ishtp_cl_disconnect(struct ishtp_cl *cl)
 265{
 266        struct ishtp_device *dev;
 267
 268        if (WARN_ON(!cl || !cl->dev))
 269                return -ENODEV;
 270
 271        dev = cl->dev;
 272
 273        dev->print_log(dev, "%s() state %d\n", __func__, cl->state);
 274
 275        if (cl->state != ISHTP_CL_DISCONNECTING) {
 276                dev->print_log(dev, "%s() Disconnect in progress\n", __func__);
 277                return 0;
 278        }
 279
 280        if (ishtp_hbm_cl_disconnect_req(dev, cl)) {
 281                dev->print_log(dev, "%s() Failed to disconnect\n", __func__);
 282                dev_err(&cl->device->dev, "failed to disconnect.\n");
 283                return -ENODEV;
 284        }
 285
 286        wait_event_interruptible_timeout(cl->wait_ctrl_res,
 287                        (dev->dev_state != ISHTP_DEV_ENABLED ||
 288                        cl->state == ISHTP_CL_DISCONNECTED),
 289                        ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT));
 290
 291        /*
 292         * If FW reset arrived, this will happen. Don't check cl->,
 293         * as 'cl' may be freed already
 294         */
 295        if (dev->dev_state != ISHTP_DEV_ENABLED) {
 296                dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
 297                               __func__);
 298                return -ENODEV;
 299        }
 300
 301        if (cl->state == ISHTP_CL_DISCONNECTED) {
 302                dev->print_log(dev, "%s() successful\n", __func__);
 303                return 0;
 304        }
 305
 306        return -ENODEV;
 307}
 308EXPORT_SYMBOL(ishtp_cl_disconnect);
 309
 310/**
 311 * ishtp_cl_is_other_connecting() - Check other client is connecting
 312 * @cl: client device instance
 313 *
 314 * Checks if other client with the same fw client id is connecting
 315 *
 316 * Return: true if other client is connected else false
 317 */
 318static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl)
 319{
 320        struct ishtp_device *dev;
 321        struct ishtp_cl *pos;
 322        unsigned long   flags;
 323
 324        if (WARN_ON(!cl || !cl->dev))
 325                return false;
 326
 327        dev = cl->dev;
 328        spin_lock_irqsave(&dev->cl_list_lock, flags);
 329        list_for_each_entry(pos, &dev->cl_list, link) {
 330                if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) &&
 331                                cl->fw_client_id == pos->fw_client_id) {
 332                        spin_unlock_irqrestore(&dev->cl_list_lock, flags);
 333                        return true;
 334                }
 335        }
 336        spin_unlock_irqrestore(&dev->cl_list_lock, flags);
 337
 338        return false;
 339}
 340
 341/**
 342 * ishtp_cl_connect() - Send connect request to firmware
 343 * @cl: client device instance
 344 *
 345 * Send a connect request for a client to firmware. If successful it will
 346 * RX and TX ring buffers
 347 *
 348 * Return: 0 if successful connect response from the firmware and able
 349 * to bind and allocate ring buffers or error code on failure
 350 */
 351int ishtp_cl_connect(struct ishtp_cl *cl)
 352{
 353        struct ishtp_device *dev;
 354        int rets;
 355
 356        if (WARN_ON(!cl || !cl->dev))
 357                return -ENODEV;
 358
 359        dev = cl->dev;
 360
 361        dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state);
 362
 363        if (ishtp_cl_is_other_connecting(cl)) {
 364                dev->print_log(dev, "%s() Busy\n", __func__);
 365                return  -EBUSY;
 366        }
 367
 368        if (ishtp_hbm_cl_connect_req(dev, cl)) {
 369                dev->print_log(dev, "%s() HBM connect req fail\n", __func__);
 370                return -ENODEV;
 371        }
 372
 373        rets = wait_event_interruptible_timeout(cl->wait_ctrl_res,
 374                                (dev->dev_state == ISHTP_DEV_ENABLED &&
 375                                (cl->state == ISHTP_CL_CONNECTED ||
 376                                 cl->state == ISHTP_CL_DISCONNECTED)),
 377                                ishtp_secs_to_jiffies(
 378                                        ISHTP_CL_CONNECT_TIMEOUT));
 379        /*
 380         * If FW reset arrived, this will happen. Don't check cl->,
 381         * as 'cl' may be freed already
 382         */
 383        if (dev->dev_state != ISHTP_DEV_ENABLED) {
 384                dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
 385                               __func__);
 386                return -EFAULT;
 387        }
 388
 389        if (cl->state != ISHTP_CL_CONNECTED) {
 390                dev->print_log(dev, "%s() state != ISHTP_CL_CONNECTED\n",
 391                               __func__);
 392                return -EFAULT;
 393        }
 394
 395        rets = cl->status;
 396        if (rets) {
 397                dev->print_log(dev, "%s() Invalid status\n", __func__);
 398                return rets;
 399        }
 400
 401        rets = ishtp_cl_device_bind(cl);
 402        if (rets) {
 403                dev->print_log(dev, "%s() Bind error\n", __func__);
 404                ishtp_cl_disconnect(cl);
 405                return rets;
 406        }
 407
 408        rets = ishtp_cl_alloc_rx_ring(cl);
 409        if (rets) {
 410                dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__);
 411                /* if failed allocation, disconnect */
 412                ishtp_cl_disconnect(cl);
 413                return rets;
 414        }
 415
 416        rets = ishtp_cl_alloc_tx_ring(cl);
 417        if (rets) {
 418                dev->print_log(dev, "%s() Alloc TX ring failed\n", __func__);
 419                /* if failed allocation, disconnect */
 420                ishtp_cl_free_rx_ring(cl);
 421                ishtp_cl_disconnect(cl);
 422                return rets;
 423        }
 424
 425        /* Upon successful connection and allocation, emit flow-control */
 426        rets = ishtp_cl_read_start(cl);
 427
 428        dev->print_log(dev, "%s() successful\n", __func__);
 429
 430        return rets;
 431}
 432EXPORT_SYMBOL(ishtp_cl_connect);
 433
 434/**
 435 * ishtp_cl_read_start() - Prepare to read client message
 436 * @cl: client device instance
 437 *
 438 * Get a free buffer from pool of free read buffers and add to read buffer
 439 * pool to add contents. Send a flow control request to firmware to be able
 440 * send next message.
 441 *
 442 * Return: 0 if successful or error code on failure
 443 */
 444int ishtp_cl_read_start(struct ishtp_cl *cl)
 445{
 446        struct ishtp_device *dev;
 447        struct ishtp_cl_rb *rb;
 448        int rets;
 449        int i;
 450        unsigned long   flags;
 451        unsigned long   dev_flags;
 452
 453        if (WARN_ON(!cl || !cl->dev))
 454                return -ENODEV;
 455
 456        dev = cl->dev;
 457
 458        if (cl->state != ISHTP_CL_CONNECTED)
 459                return -ENODEV;
 460
 461        if (dev->dev_state != ISHTP_DEV_ENABLED)
 462                return -ENODEV;
 463
 464        i = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
 465        if (i < 0) {
 466                dev_err(&cl->device->dev, "no such fw client %d\n",
 467                        cl->fw_client_id);
 468                return -ENODEV;
 469        }
 470
 471        /* The current rb is the head of the free rb list */
 472        spin_lock_irqsave(&cl->free_list_spinlock, flags);
 473        if (list_empty(&cl->free_rb_list.list)) {
 474                dev_warn(&cl->device->dev,
 475                         "[ishtp-ish] Rx buffers pool is empty\n");
 476                rets = -ENOMEM;
 477                rb = NULL;
 478                spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
 479                goto out;
 480        }
 481        rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
 482        list_del_init(&rb->list);
 483        spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
 484
 485        rb->cl = cl;
 486        rb->buf_idx = 0;
 487
 488        INIT_LIST_HEAD(&rb->list);
 489        rets = 0;
 490
 491        /*
 492         * This must be BEFORE sending flow control -
 493         * response in ISR may come too fast...
 494         */
 495        spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
 496        list_add_tail(&rb->list, &dev->read_list.list);
 497        spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
 498        if (ishtp_hbm_cl_flow_control_req(dev, cl)) {
 499                rets = -ENODEV;
 500                goto out;
 501        }
 502out:
 503        /* if ishtp_hbm_cl_flow_control_req failed, return rb to free list */
 504        if (rets && rb) {
 505                spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
 506                list_del(&rb->list);
 507                spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
 508
 509                spin_lock_irqsave(&cl->free_list_spinlock, flags);
 510                list_add_tail(&rb->list, &cl->free_rb_list.list);
 511                spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
 512        }
 513        return rets;
 514}
 515
 516/**
 517 * ishtp_cl_send() - Send a message to firmware
 518 * @cl: client device instance
 519 * @buf: message buffer
 520 * @length: length of message
 521 *
 522 * If the client is correct state to send message, this function gets a buffer
 523 * from tx ring buffers, copy the message data and call to send the message
 524 * using ishtp_cl_send_msg()
 525 *
 526 * Return: 0 if successful or error code on failure
 527 */
 528int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
 529{
 530        struct ishtp_device     *dev;
 531        int     id;
 532        struct ishtp_cl_tx_ring *cl_msg;
 533        int     have_msg_to_send = 0;
 534        unsigned long   tx_flags, tx_free_flags;
 535
 536        if (WARN_ON(!cl || !cl->dev))
 537                return -ENODEV;
 538
 539        dev = cl->dev;
 540
 541        if (cl->state != ISHTP_CL_CONNECTED) {
 542                ++cl->err_send_msg;
 543                return -EPIPE;
 544        }
 545
 546        if (dev->dev_state != ISHTP_DEV_ENABLED) {
 547                ++cl->err_send_msg;
 548                return -ENODEV;
 549        }
 550
 551        /* Check if we have fw client device */
 552        id = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
 553        if (id < 0) {
 554                ++cl->err_send_msg;
 555                return -ENOENT;
 556        }
 557
 558        if (length > dev->fw_clients[id].props.max_msg_length) {
 559                ++cl->err_send_msg;
 560                return -EMSGSIZE;
 561        }
 562
 563        /* No free bufs */
 564        spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
 565        if (list_empty(&cl->tx_free_list.list)) {
 566                spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
 567                        tx_free_flags);
 568                ++cl->err_send_msg;
 569                return  -ENOMEM;
 570        }
 571
 572        cl_msg = list_first_entry(&cl->tx_free_list.list,
 573                struct ishtp_cl_tx_ring, list);
 574        if (!cl_msg->send_buf.data) {
 575                spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
 576                        tx_free_flags);
 577                return  -EIO;
 578                /* Should not happen, as free list is pre-allocated */
 579        }
 580        /*
 581         * This is safe, as 'length' is already checked for not exceeding
 582         * max ISHTP message size per client
 583         */
 584        list_del_init(&cl_msg->list);
 585        --cl->tx_ring_free_size;
 586
 587        spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
 588        memcpy(cl_msg->send_buf.data, buf, length);
 589        cl_msg->send_buf.size = length;
 590        spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
 591        have_msg_to_send = !list_empty(&cl->tx_list.list);
 592        list_add_tail(&cl_msg->list, &cl->tx_list.list);
 593        spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
 594
 595        if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0)
 596                ishtp_cl_send_msg(dev, cl);
 597
 598        return  0;
 599}
 600EXPORT_SYMBOL(ishtp_cl_send);
 601
 602/**
 603 * ishtp_cl_read_complete() - read complete
 604 * @rb: Pointer to client request block
 605 *
 606 * If the message is completely received call ishtp_cl_bus_rx_event()
 607 * to process message
 608 */
 609static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
 610{
 611        unsigned long   flags;
 612        int     schedule_work_flag = 0;
 613        struct ishtp_cl *cl = rb->cl;
 614
 615        spin_lock_irqsave(&cl->in_process_spinlock, flags);
 616        /*
 617         * if in-process list is empty, then need to schedule
 618         * the processing thread
 619         */
 620        schedule_work_flag = list_empty(&cl->in_process_list.list);
 621        list_add_tail(&rb->list, &cl->in_process_list.list);
 622        spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
 623
 624        if (schedule_work_flag)
 625                ishtp_cl_bus_rx_event(cl->device);
 626}
 627
 628/**
 629 * ipc_tx_callback() - IPC tx callback function
 630 * @prm: Pointer to client device instance
 631 *
 632 * Send message over IPC either first time or on callback on previous message
 633 * completion
 634 */
 635static void ipc_tx_callback(void *prm)
 636{
 637        struct ishtp_cl *cl = prm;
 638        struct ishtp_cl_tx_ring *cl_msg;
 639        size_t  rem;
 640        struct ishtp_device     *dev = (cl ? cl->dev : NULL);
 641        struct ishtp_msg_hdr    ishtp_hdr;
 642        unsigned long   tx_flags, tx_free_flags;
 643        unsigned char   *pmsg;
 644
 645        if (!dev)
 646                return;
 647
 648        /*
 649         * Other conditions if some critical error has
 650         * occurred before this callback is called
 651         */
 652        if (dev->dev_state != ISHTP_DEV_ENABLED)
 653                return;
 654
 655        if (cl->state != ISHTP_CL_CONNECTED)
 656                return;
 657
 658        spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
 659        if (list_empty(&cl->tx_list.list)) {
 660                spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
 661                return;
 662        }
 663
 664        if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) {
 665                spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
 666                return;
 667        }
 668
 669        if (!cl->sending) {
 670                --cl->ishtp_flow_ctrl_creds;
 671                cl->last_ipc_acked = 0;
 672                cl->last_tx_path = CL_TX_PATH_IPC;
 673                cl->sending = 1;
 674        }
 675
 676        cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
 677                            list);
 678        rem = cl_msg->send_buf.size - cl->tx_offs;
 679
 680        ishtp_hdr.host_addr = cl->host_client_id;
 681        ishtp_hdr.fw_addr = cl->fw_client_id;
 682        ishtp_hdr.reserved = 0;
 683        pmsg = cl_msg->send_buf.data + cl->tx_offs;
 684
 685        if (rem <= dev->mtu) {
 686                ishtp_hdr.length = rem;
 687                ishtp_hdr.msg_complete = 1;
 688                cl->sending = 0;
 689                list_del_init(&cl_msg->list);   /* Must be before write */
 690                spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
 691                /* Submit to IPC queue with no callback */
 692                ishtp_write_message(dev, &ishtp_hdr, pmsg);
 693                spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
 694                list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
 695                ++cl->tx_ring_free_size;
 696                spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
 697                        tx_free_flags);
 698        } else {
 699                /* Send IPC fragment */
 700                spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
 701                cl->tx_offs += dev->mtu;
 702                ishtp_hdr.length = dev->mtu;
 703                ishtp_hdr.msg_complete = 0;
 704                ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl);
 705        }
 706}
 707
 708/**
 709 * ishtp_cl_send_msg_ipc() -Send message using IPC
 710 * @dev: ISHTP device instance
 711 * @cl: Pointer to client device instance
 712 *
 713 * Send message over IPC not using DMA
 714 */
 715static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
 716                                  struct ishtp_cl *cl)
 717{
 718        /* If last DMA message wasn't acked yet, leave this one in Tx queue */
 719        if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0)
 720                return;
 721
 722        cl->tx_offs = 0;
 723        ipc_tx_callback(cl);
 724        ++cl->send_msg_cnt_ipc;
 725}
 726
 727/**
 728 * ishtp_cl_send_msg_dma() -Send message using DMA
 729 * @dev: ISHTP device instance
 730 * @cl: Pointer to client device instance
 731 *
 732 * Send message using DMA
 733 */
 734static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
 735        struct ishtp_cl *cl)
 736{
 737        struct ishtp_msg_hdr    hdr;
 738        struct dma_xfer_hbm     dma_xfer;
 739        unsigned char   *msg_addr;
 740        int off;
 741        struct ishtp_cl_tx_ring *cl_msg;
 742        unsigned long tx_flags, tx_free_flags;
 743
 744        /* If last IPC message wasn't acked yet, leave this one in Tx queue */
 745        if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0)
 746                return;
 747
 748        spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
 749        if (list_empty(&cl->tx_list.list)) {
 750                spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
 751                return;
 752        }
 753
 754        cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
 755                list);
 756
 757        msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size);
 758        if (!msg_addr) {
 759                spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
 760                if (dev->transfer_path == CL_TX_PATH_DEFAULT)
 761                        ishtp_cl_send_msg_ipc(dev, cl);
 762                return;
 763        }
 764
 765        list_del_init(&cl_msg->list);   /* Must be before write */
 766        spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
 767
 768        --cl->ishtp_flow_ctrl_creds;
 769        cl->last_dma_acked = 0;
 770        cl->last_dma_addr = msg_addr;
 771        cl->last_tx_path = CL_TX_PATH_DMA;
 772
 773        /* write msg to dma buf */
 774        memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size);
 775
 776        /*
 777         * if current fw don't support cache snooping, driver have to
 778         * flush the cache manually.
 779         */
 780        if (dev->ops->dma_no_cache_snooping &&
 781                dev->ops->dma_no_cache_snooping(dev))
 782                clflush_cache_range(msg_addr, cl_msg->send_buf.size);
 783
 784        /* send dma_xfer hbm msg */
 785        off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf;
 786        ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm));
 787        dma_xfer.hbm = DMA_XFER;
 788        dma_xfer.fw_client_id = cl->fw_client_id;
 789        dma_xfer.host_client_id = cl->host_client_id;
 790        dma_xfer.reserved = 0;
 791        dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off;
 792        dma_xfer.msg_length = cl_msg->send_buf.size;
 793        dma_xfer.reserved2 = 0;
 794        ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
 795        spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
 796        list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
 797        ++cl->tx_ring_free_size;
 798        spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
 799        ++cl->send_msg_cnt_dma;
 800}
 801
 802/**
 803 * ishtp_cl_send_msg() -Send message using DMA or IPC
 804 * @dev: ISHTP device instance
 805 * @cl: Pointer to client device instance
 806 *
 807 * Send message using DMA or IPC based on transfer_path
 808 */
 809void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl)
 810{
 811        if (dev->transfer_path == CL_TX_PATH_DMA)
 812                ishtp_cl_send_msg_dma(dev, cl);
 813        else
 814                ishtp_cl_send_msg_ipc(dev, cl);
 815}
 816
 817/**
 818 * recv_ishtp_cl_msg() -Receive client message
 819 * @dev: ISHTP device instance
 820 * @ishtp_hdr: Pointer to message header
 821 *
 822 * Receive and dispatch ISHTP client messages. This function executes in ISR
 823 * or work queue context
 824 */
 825void recv_ishtp_cl_msg(struct ishtp_device *dev,
 826                       struct ishtp_msg_hdr *ishtp_hdr)
 827{
 828        struct ishtp_cl *cl;
 829        struct ishtp_cl_rb *rb;
 830        struct ishtp_cl_rb *new_rb;
 831        unsigned char *buffer = NULL;
 832        struct ishtp_cl_rb *complete_rb = NULL;
 833        unsigned long   flags;
 834        int     rb_count;
 835
 836        if (ishtp_hdr->reserved) {
 837                dev_err(dev->devc, "corrupted message header.\n");
 838                goto    eoi;
 839        }
 840
 841        if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) {
 842                dev_err(dev->devc,
 843                        "ISHTP message length in hdr exceeds IPC MTU\n");
 844                goto    eoi;
 845        }
 846
 847        spin_lock_irqsave(&dev->read_list_spinlock, flags);
 848        rb_count = -1;
 849        list_for_each_entry(rb, &dev->read_list.list, list) {
 850                ++rb_count;
 851                cl = rb->cl;
 852                if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr &&
 853                                cl->fw_client_id == ishtp_hdr->fw_addr) ||
 854                                !(cl->state == ISHTP_CL_CONNECTED))
 855                        continue;
 856
 857                 /* If no Rx buffer is allocated, disband the rb */
 858                if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
 859                        spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
 860                        dev_err(&cl->device->dev,
 861                                "Rx buffer is not allocated.\n");
 862                        list_del(&rb->list);
 863                        ishtp_io_rb_free(rb);
 864                        cl->status = -ENOMEM;
 865                        goto    eoi;
 866                }
 867
 868                /*
 869                 * If message buffer overflown (exceeds max. client msg
 870                 * size, drop message and return to free buffer.
 871                 * Do we need to disconnect such a client? (We don't send
 872                 * back FC, so communication will be stuck anyway)
 873                 */
 874                if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) {
 875                        spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
 876                        dev_err(&cl->device->dev,
 877                                "message overflow. size %d len %d idx %ld\n",
 878                                rb->buffer.size, ishtp_hdr->length,
 879                                rb->buf_idx);
 880                        list_del(&rb->list);
 881                        ishtp_cl_io_rb_recycle(rb);
 882                        cl->status = -EIO;
 883                        goto    eoi;
 884                }
 885
 886                buffer = rb->buffer.data + rb->buf_idx;
 887                dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length);
 888
 889                rb->buf_idx += ishtp_hdr->length;
 890                if (ishtp_hdr->msg_complete) {
 891                        /* Last fragment in message - it's complete */
 892                        cl->status = 0;
 893                        list_del(&rb->list);
 894                        complete_rb = rb;
 895
 896                        --cl->out_flow_ctrl_creds;
 897                        /*
 898                         * the whole msg arrived, send a new FC, and add a new
 899                         * rb buffer for the next coming msg
 900                         */
 901                        spin_lock(&cl->free_list_spinlock);
 902
 903                        if (!list_empty(&cl->free_rb_list.list)) {
 904                                new_rb = list_entry(cl->free_rb_list.list.next,
 905                                        struct ishtp_cl_rb, list);
 906                                list_del_init(&new_rb->list);
 907                                spin_unlock(&cl->free_list_spinlock);
 908                                new_rb->cl = cl;
 909                                new_rb->buf_idx = 0;
 910                                INIT_LIST_HEAD(&new_rb->list);
 911                                list_add_tail(&new_rb->list,
 912                                        &dev->read_list.list);
 913
 914                                ishtp_hbm_cl_flow_control_req(dev, cl);
 915                        } else {
 916                                spin_unlock(&cl->free_list_spinlock);
 917                        }
 918                }
 919                /* One more fragment in message (even if this was last) */
 920                ++cl->recv_msg_num_frags;
 921
 922                /*
 923                 * We can safely break here (and in BH too),
 924                 * a single input message can go only to a single request!
 925                 */
 926                break;
 927        }
 928
 929        spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
 930        /* If it's nobody's message, just read and discard it */
 931        if (!buffer) {
 932                uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
 933
 934                dev_err(dev->devc, "Dropped Rx msg - no request\n");
 935                dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
 936                goto    eoi;
 937        }
 938
 939        if (complete_rb) {
 940                cl = complete_rb->cl;
 941                cl->ts_rx = ktime_get();
 942                ++cl->recv_msg_cnt_ipc;
 943                ishtp_cl_read_complete(complete_rb);
 944        }
 945eoi:
 946        return;
 947}
 948
 949/**
 950 * recv_ishtp_cl_msg_dma() -Receive client message
 951 * @dev: ISHTP device instance
 952 * @msg: message pointer
 953 * @hbm: hbm buffer
 954 *
 955 * Receive and dispatch ISHTP client messages using DMA. This function executes
 956 * in ISR or work queue context
 957 */
 958void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
 959                           struct dma_xfer_hbm *hbm)
 960{
 961        struct ishtp_cl *cl;
 962        struct ishtp_cl_rb *rb;
 963        struct ishtp_cl_rb *new_rb;
 964        unsigned char *buffer = NULL;
 965        struct ishtp_cl_rb *complete_rb = NULL;
 966        unsigned long   flags;
 967
 968        spin_lock_irqsave(&dev->read_list_spinlock, flags);
 969
 970        list_for_each_entry(rb, &dev->read_list.list, list) {
 971                cl = rb->cl;
 972                if (!cl || !(cl->host_client_id == hbm->host_client_id &&
 973                                cl->fw_client_id == hbm->fw_client_id) ||
 974                                !(cl->state == ISHTP_CL_CONNECTED))
 975                        continue;
 976
 977                /*
 978                 * If no Rx buffer is allocated, disband the rb
 979                 */
 980                if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
 981                        spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
 982                        dev_err(&cl->device->dev,
 983                                "response buffer is not allocated.\n");
 984                        list_del(&rb->list);
 985                        ishtp_io_rb_free(rb);
 986                        cl->status = -ENOMEM;
 987                        goto    eoi;
 988                }
 989
 990                /*
 991                 * If message buffer overflown (exceeds max. client msg
 992                 * size, drop message and return to free buffer.
 993                 * Do we need to disconnect such a client? (We don't send
 994                 * back FC, so communication will be stuck anyway)
 995                 */
 996                if (rb->buffer.size < hbm->msg_length) {
 997                        spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
 998                        dev_err(&cl->device->dev,
 999                                "message overflow. size %d len %d idx %ld\n",
1000                                rb->buffer.size, hbm->msg_length, rb->buf_idx);
1001                        list_del(&rb->list);
1002                        ishtp_cl_io_rb_recycle(rb);
1003                        cl->status = -EIO;
1004                        goto    eoi;
1005                }
1006
1007                buffer = rb->buffer.data;
1008
1009                /*
1010                 * if current fw don't support cache snooping, driver have to
1011                 * flush the cache manually.
1012                 */
1013                if (dev->ops->dma_no_cache_snooping &&
1014                        dev->ops->dma_no_cache_snooping(dev))
1015                        clflush_cache_range(msg, hbm->msg_length);
1016
1017                memcpy(buffer, msg, hbm->msg_length);
1018                rb->buf_idx = hbm->msg_length;
1019
1020                /* Last fragment in message - it's complete */
1021                cl->status = 0;
1022                list_del(&rb->list);
1023                complete_rb = rb;
1024
1025                --cl->out_flow_ctrl_creds;
1026                /*
1027                 * the whole msg arrived, send a new FC, and add a new
1028                 * rb buffer for the next coming msg
1029                 */
1030                spin_lock(&cl->free_list_spinlock);
1031
1032                if (!list_empty(&cl->free_rb_list.list)) {
1033                        new_rb = list_entry(cl->free_rb_list.list.next,
1034                                struct ishtp_cl_rb, list);
1035                        list_del_init(&new_rb->list);
1036                        spin_unlock(&cl->free_list_spinlock);
1037                        new_rb->cl = cl;
1038                        new_rb->buf_idx = 0;
1039                        INIT_LIST_HEAD(&new_rb->list);
1040                        list_add_tail(&new_rb->list,
1041                                &dev->read_list.list);
1042
1043                        ishtp_hbm_cl_flow_control_req(dev, cl);
1044                } else {
1045                        spin_unlock(&cl->free_list_spinlock);
1046                }
1047
1048                /* One more fragment in message (this is always last) */
1049                ++cl->recv_msg_num_frags;
1050
1051                /*
1052                 * We can safely break here (and in BH too),
1053                 * a single input message can go only to a single request!
1054                 */
1055                break;
1056        }
1057
1058        spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1059        /* If it's nobody's message, just read and discard it */
1060        if (!buffer) {
1061                dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n");
1062                goto    eoi;
1063        }
1064
1065        if (complete_rb) {
1066                cl = complete_rb->cl;
1067                cl->ts_rx = ktime_get();
1068                ++cl->recv_msg_cnt_dma;
1069                ishtp_cl_read_complete(complete_rb);
1070        }
1071eoi:
1072        return;
1073}
1074
1075void *ishtp_get_client_data(struct ishtp_cl *cl)
1076{
1077        return cl->client_data;
1078}
1079EXPORT_SYMBOL(ishtp_get_client_data);
1080
1081void ishtp_set_client_data(struct ishtp_cl *cl, void *data)
1082{
1083        cl->client_data = data;
1084}
1085EXPORT_SYMBOL(ishtp_set_client_data);
1086
1087struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl)
1088{
1089        return cl->dev;
1090}
1091EXPORT_SYMBOL(ishtp_get_ishtp_device);
1092
1093void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size)
1094{
1095        cl->tx_ring_size = size;
1096}
1097EXPORT_SYMBOL(ishtp_set_tx_ring_size);
1098
1099void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size)
1100{
1101        cl->rx_ring_size = size;
1102}
1103EXPORT_SYMBOL(ishtp_set_rx_ring_size);
1104
1105void ishtp_set_connection_state(struct ishtp_cl *cl, int state)
1106{
1107        cl->state = state;
1108}
1109EXPORT_SYMBOL(ishtp_set_connection_state);
1110
1111void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id)
1112{
1113        cl->fw_client_id = fw_client_id;
1114}
1115EXPORT_SYMBOL(ishtp_cl_set_fw_client_id);
1116