linux/drivers/misc/mei/client.c
<<
>>
Prefs
   1/*
   2 *
   3 * Intel Management Engine Interface (Intel MEI) Linux driver
   4 * Copyright (c) 2003-2012, Intel Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 */
  16
  17#include <linux/sched/signal.h>
  18#include <linux/wait.h>
  19#include <linux/delay.h>
  20#include <linux/slab.h>
  21#include <linux/pm_runtime.h>
  22
  23#include <linux/mei.h>
  24
  25#include "mei_dev.h"
  26#include "hbm.h"
  27#include "client.h"
  28
  29/**
  30 * mei_me_cl_init - initialize me client
  31 *
  32 * @me_cl: me client
  33 */
  34void mei_me_cl_init(struct mei_me_client *me_cl)
  35{
  36        INIT_LIST_HEAD(&me_cl->list);
  37        kref_init(&me_cl->refcnt);
  38}
  39
  40/**
  41 * mei_me_cl_get - increases me client refcount
  42 *
  43 * @me_cl: me client
  44 *
  45 * Locking: called under "dev->device_lock" lock
  46 *
  47 * Return: me client or NULL
  48 */
  49struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
  50{
  51        if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
  52                return me_cl;
  53
  54        return NULL;
  55}
  56
  57/**
  58 * mei_me_cl_release - free me client
  59 *
  60 * Locking: called under "dev->device_lock" lock
  61 *
  62 * @ref: me_client refcount
  63 */
  64static void mei_me_cl_release(struct kref *ref)
  65{
  66        struct mei_me_client *me_cl =
  67                container_of(ref, struct mei_me_client, refcnt);
  68
  69        kfree(me_cl);
  70}
  71
  72/**
  73 * mei_me_cl_put - decrease me client refcount and free client if necessary
  74 *
  75 * Locking: called under "dev->device_lock" lock
  76 *
  77 * @me_cl: me client
  78 */
  79void mei_me_cl_put(struct mei_me_client *me_cl)
  80{
  81        if (me_cl)
  82                kref_put(&me_cl->refcnt, mei_me_cl_release);
  83}
  84
  85/**
  86 * __mei_me_cl_del  - delete me client from the list and decrease
  87 *     reference counter
  88 *
  89 * @dev: mei device
  90 * @me_cl: me client
  91 *
  92 * Locking: dev->me_clients_rwsem
  93 */
  94static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
  95{
  96        if (!me_cl)
  97                return;
  98
  99        list_del_init(&me_cl->list);
 100        mei_me_cl_put(me_cl);
 101}
 102
 103/**
 104 * mei_me_cl_del - delete me client from the list and decrease
 105 *     reference counter
 106 *
 107 * @dev: mei device
 108 * @me_cl: me client
 109 */
 110void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
 111{
 112        down_write(&dev->me_clients_rwsem);
 113        __mei_me_cl_del(dev, me_cl);
 114        up_write(&dev->me_clients_rwsem);
 115}
 116
 117/**
 118 * mei_me_cl_add - add me client to the list
 119 *
 120 * @dev: mei device
 121 * @me_cl: me client
 122 */
 123void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
 124{
 125        down_write(&dev->me_clients_rwsem);
 126        list_add(&me_cl->list, &dev->me_clients);
 127        up_write(&dev->me_clients_rwsem);
 128}
 129
 130/**
 131 * __mei_me_cl_by_uuid - locate me client by uuid
 132 *      increases ref count
 133 *
 134 * @dev: mei device
 135 * @uuid: me client uuid
 136 *
 137 * Return: me client or NULL if not found
 138 *
 139 * Locking: dev->me_clients_rwsem
 140 */
 141static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
 142                                        const uuid_le *uuid)
 143{
 144        struct mei_me_client *me_cl;
 145        const uuid_le *pn;
 146
 147        WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
 148
 149        list_for_each_entry(me_cl, &dev->me_clients, list) {
 150                pn = &me_cl->props.protocol_name;
 151                if (uuid_le_cmp(*uuid, *pn) == 0)
 152                        return mei_me_cl_get(me_cl);
 153        }
 154
 155        return NULL;
 156}
 157
 158/**
 159 * mei_me_cl_by_uuid - locate me client by uuid
 160 *      increases ref count
 161 *
 162 * @dev: mei device
 163 * @uuid: me client uuid
 164 *
 165 * Return: me client or NULL if not found
 166 *
 167 * Locking: dev->me_clients_rwsem
 168 */
 169struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
 170                                        const uuid_le *uuid)
 171{
 172        struct mei_me_client *me_cl;
 173
 174        down_read(&dev->me_clients_rwsem);
 175        me_cl = __mei_me_cl_by_uuid(dev, uuid);
 176        up_read(&dev->me_clients_rwsem);
 177
 178        return me_cl;
 179}
 180
 181/**
 182 * mei_me_cl_by_id - locate me client by client id
 183 *      increases ref count
 184 *
 185 * @dev: the device structure
 186 * @client_id: me client id
 187 *
 188 * Return: me client or NULL if not found
 189 *
 190 * Locking: dev->me_clients_rwsem
 191 */
 192struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
 193{
 194
 195        struct mei_me_client *__me_cl, *me_cl = NULL;
 196
 197        down_read(&dev->me_clients_rwsem);
 198        list_for_each_entry(__me_cl, &dev->me_clients, list) {
 199                if (__me_cl->client_id == client_id) {
 200                        me_cl = mei_me_cl_get(__me_cl);
 201                        break;
 202                }
 203        }
 204        up_read(&dev->me_clients_rwsem);
 205
 206        return me_cl;
 207}
 208
 209/**
 210 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
 211 *      increases ref count
 212 *
 213 * @dev: the device structure
 214 * @uuid: me client uuid
 215 * @client_id: me client id
 216 *
 217 * Return: me client or null if not found
 218 *
 219 * Locking: dev->me_clients_rwsem
 220 */
 221static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
 222                                           const uuid_le *uuid, u8 client_id)
 223{
 224        struct mei_me_client *me_cl;
 225        const uuid_le *pn;
 226
 227        WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
 228
 229        list_for_each_entry(me_cl, &dev->me_clients, list) {
 230                pn = &me_cl->props.protocol_name;
 231                if (uuid_le_cmp(*uuid, *pn) == 0 &&
 232                    me_cl->client_id == client_id)
 233                        return mei_me_cl_get(me_cl);
 234        }
 235
 236        return NULL;
 237}
 238
 239
 240/**
 241 * mei_me_cl_by_uuid_id - locate me client by client id and uuid
 242 *      increases ref count
 243 *
 244 * @dev: the device structure
 245 * @uuid: me client uuid
 246 * @client_id: me client id
 247 *
 248 * Return: me client or null if not found
 249 */
 250struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
 251                                           const uuid_le *uuid, u8 client_id)
 252{
 253        struct mei_me_client *me_cl;
 254
 255        down_read(&dev->me_clients_rwsem);
 256        me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
 257        up_read(&dev->me_clients_rwsem);
 258
 259        return me_cl;
 260}
 261
 262/**
 263 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
 264 *
 265 * @dev: the device structure
 266 * @uuid: me client uuid
 267 *
 268 * Locking: called under "dev->device_lock" lock
 269 */
 270void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
 271{
 272        struct mei_me_client *me_cl;
 273
 274        dev_dbg(dev->dev, "remove %pUl\n", uuid);
 275
 276        down_write(&dev->me_clients_rwsem);
 277        me_cl = __mei_me_cl_by_uuid(dev, uuid);
 278        __mei_me_cl_del(dev, me_cl);
 279        up_write(&dev->me_clients_rwsem);
 280}
 281
 282/**
 283 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
 284 *
 285 * @dev: the device structure
 286 * @uuid: me client uuid
 287 * @id: me client id
 288 *
 289 * Locking: called under "dev->device_lock" lock
 290 */
 291void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
 292{
 293        struct mei_me_client *me_cl;
 294
 295        dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
 296
 297        down_write(&dev->me_clients_rwsem);
 298        me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
 299        __mei_me_cl_del(dev, me_cl);
 300        up_write(&dev->me_clients_rwsem);
 301}
 302
 303/**
 304 * mei_me_cl_rm_all - remove all me clients
 305 *
 306 * @dev: the device structure
 307 *
 308 * Locking: called under "dev->device_lock" lock
 309 */
 310void mei_me_cl_rm_all(struct mei_device *dev)
 311{
 312        struct mei_me_client *me_cl, *next;
 313
 314        down_write(&dev->me_clients_rwsem);
 315        list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
 316                __mei_me_cl_del(dev, me_cl);
 317        up_write(&dev->me_clients_rwsem);
 318}
 319
 320/**
 321 * mei_cl_cmp_id - tells if the clients are the same
 322 *
 323 * @cl1: host client 1
 324 * @cl2: host client 2
 325 *
 326 * Return: true  - if the clients has same host and me ids
 327 *         false - otherwise
 328 */
 329static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
 330                                const struct mei_cl *cl2)
 331{
 332        return cl1 && cl2 &&
 333                (cl1->host_client_id == cl2->host_client_id) &&
 334                (mei_cl_me_id(cl1) == mei_cl_me_id(cl2));
 335}
 336
 337/**
 338 * mei_io_cb_free - free mei_cb_private related memory
 339 *
 340 * @cb: mei callback struct
 341 */
 342void mei_io_cb_free(struct mei_cl_cb *cb)
 343{
 344        if (cb == NULL)
 345                return;
 346
 347        list_del(&cb->list);
 348        kfree(cb->buf.data);
 349        kfree(cb);
 350}
 351
 352/**
 353 * mei_io_cb_init - allocate and initialize io callback
 354 *
 355 * @cl: mei client
 356 * @type: operation type
 357 * @fp: pointer to file structure
 358 *
 359 * Return: mei_cl_cb pointer or NULL;
 360 */
 361static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
 362                                        enum mei_cb_file_ops type,
 363                                        const struct file *fp)
 364{
 365        struct mei_cl_cb *cb;
 366
 367        cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
 368        if (!cb)
 369                return NULL;
 370
 371        INIT_LIST_HEAD(&cb->list);
 372        cb->fp = fp;
 373        cb->cl = cl;
 374        cb->buf_idx = 0;
 375        cb->fop_type = type;
 376        return cb;
 377}
 378
 379/**
 380 * __mei_io_list_flush_cl - removes and frees cbs belonging to cl.
 381 *
 382 * @head:  an instance of our list structure
 383 * @cl:    host client, can be NULL for flushing the whole list
 384 * @free:  whether to free the cbs
 385 */
 386static void __mei_io_list_flush_cl(struct list_head *head,
 387                                   const struct mei_cl *cl, bool free)
 388{
 389        struct mei_cl_cb *cb, *next;
 390
 391        /* enable removing everything if no cl is specified */
 392        list_for_each_entry_safe(cb, next, head, list) {
 393                if (!cl || mei_cl_cmp_id(cl, cb->cl)) {
 394                        list_del_init(&cb->list);
 395                        if (free)
 396                                mei_io_cb_free(cb);
 397                }
 398        }
 399}
 400
 401/**
 402 * mei_io_list_flush_cl - removes list entry belonging to cl.
 403 *
 404 * @head: An instance of our list structure
 405 * @cl: host client
 406 */
 407static inline void mei_io_list_flush_cl(struct list_head *head,
 408                                        const struct mei_cl *cl)
 409{
 410        __mei_io_list_flush_cl(head, cl, false);
 411}
 412
 413/**
 414 * mei_io_list_free_cl - removes cb belonging to cl and free them
 415 *
 416 * @head: An instance of our list structure
 417 * @cl: host client
 418 */
 419static inline void mei_io_list_free_cl(struct list_head *head,
 420                                       const struct mei_cl *cl)
 421{
 422        __mei_io_list_flush_cl(head, cl, true);
 423}
 424
 425/**
 426 * mei_io_list_free_fp - free cb from a list that matches file pointer
 427 *
 428 * @head: io list
 429 * @fp: file pointer (matching cb file object), may be NULL
 430 */
 431static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
 432{
 433        struct mei_cl_cb *cb, *next;
 434
 435        list_for_each_entry_safe(cb, next, head, list)
 436                if (!fp || fp == cb->fp)
 437                        mei_io_cb_free(cb);
 438}
 439
 440/**
 441 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
 442 *
 443 * @cl: host client
 444 * @length: size of the buffer
 445 * @fop_type: operation type
 446 * @fp: associated file pointer (might be NULL)
 447 *
 448 * Return: cb on success and NULL on failure
 449 */
 450struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
 451                                  enum mei_cb_file_ops fop_type,
 452                                  const struct file *fp)
 453{
 454        struct mei_cl_cb *cb;
 455
 456        cb = mei_io_cb_init(cl, fop_type, fp);
 457        if (!cb)
 458                return NULL;
 459
 460        if (length == 0)
 461                return cb;
 462
 463        cb->buf.data = kmalloc(length, GFP_KERNEL);
 464        if (!cb->buf.data) {
 465                mei_io_cb_free(cb);
 466                return NULL;
 467        }
 468        cb->buf.size = length;
 469
 470        return cb;
 471}
 472
 473/**
 474 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
 475 *     and enqueuing of the control commands cb
 476 *
 477 * @cl: host client
 478 * @length: size of the buffer
 479 * @fop_type: operation type
 480 * @fp: associated file pointer (might be NULL)
 481 *
 482 * Return: cb on success and NULL on failure
 483 * Locking: called under "dev->device_lock" lock
 484 */
 485struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
 486                                            enum mei_cb_file_ops fop_type,
 487                                            const struct file *fp)
 488{
 489        struct mei_cl_cb *cb;
 490
 491        /* for RX always allocate at least client's mtu */
 492        if (length)
 493                length = max_t(size_t, length, mei_cl_mtu(cl));
 494
 495        cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
 496        if (!cb)
 497                return NULL;
 498
 499        list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
 500        return cb;
 501}
 502
 503/**
 504 * mei_cl_read_cb - find this cl's callback in the read list
 505 *     for a specific file
 506 *
 507 * @cl: host client
 508 * @fp: file pointer (matching cb file object), may be NULL
 509 *
 510 * Return: cb on success, NULL if cb is not found
 511 */
 512struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
 513{
 514        struct mei_cl_cb *cb;
 515
 516        list_for_each_entry(cb, &cl->rd_completed, list)
 517                if (!fp || fp == cb->fp)
 518                        return cb;
 519
 520        return NULL;
 521}
 522
 523/**
 524 * mei_cl_flush_queues - flushes queue lists belonging to cl.
 525 *
 526 * @cl: host client
 527 * @fp: file pointer (matching cb file object), may be NULL
 528 *
 529 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
 530 */
 531int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
 532{
 533        struct mei_device *dev;
 534
 535        if (WARN_ON(!cl || !cl->dev))
 536                return -EINVAL;
 537
 538        dev = cl->dev;
 539
 540        cl_dbg(dev, cl, "remove list entry belonging to cl\n");
 541        mei_io_list_free_cl(&cl->dev->write_list, cl);
 542        mei_io_list_free_cl(&cl->dev->write_waiting_list, cl);
 543        mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
 544        mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
 545        mei_io_list_free_fp(&cl->rd_pending, fp);
 546        mei_io_list_free_fp(&cl->rd_completed, fp);
 547
 548        return 0;
 549}
 550
 551/**
 552 * mei_cl_init - initializes cl.
 553 *
 554 * @cl: host client to be initialized
 555 * @dev: mei device
 556 */
 557static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
 558{
 559        memset(cl, 0, sizeof(struct mei_cl));
 560        init_waitqueue_head(&cl->wait);
 561        init_waitqueue_head(&cl->rx_wait);
 562        init_waitqueue_head(&cl->tx_wait);
 563        init_waitqueue_head(&cl->ev_wait);
 564        INIT_LIST_HEAD(&cl->rd_completed);
 565        INIT_LIST_HEAD(&cl->rd_pending);
 566        INIT_LIST_HEAD(&cl->link);
 567        cl->writing_state = MEI_IDLE;
 568        cl->state = MEI_FILE_UNINITIALIZED;
 569        cl->dev = dev;
 570}
 571
 572/**
 573 * mei_cl_allocate - allocates cl  structure and sets it up.
 574 *
 575 * @dev: mei device
 576 * Return:  The allocated file or NULL on failure
 577 */
 578struct mei_cl *mei_cl_allocate(struct mei_device *dev)
 579{
 580        struct mei_cl *cl;
 581
 582        cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
 583        if (!cl)
 584                return NULL;
 585
 586        mei_cl_init(cl, dev);
 587
 588        return cl;
 589}
 590
 591/**
 592 * mei_cl_link - allocate host id in the host map
 593 *
 594 * @cl: host client
 595 *
 596 * Return: 0 on success
 597 *      -EINVAL on incorrect values
 598 *      -EMFILE if open count exceeded.
 599 */
 600int mei_cl_link(struct mei_cl *cl)
 601{
 602        struct mei_device *dev;
 603        int id;
 604
 605        if (WARN_ON(!cl || !cl->dev))
 606                return -EINVAL;
 607
 608        dev = cl->dev;
 609
 610        id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
 611        if (id >= MEI_CLIENTS_MAX) {
 612                dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
 613                return -EMFILE;
 614        }
 615
 616        if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
 617                dev_err(dev->dev, "open_handle_count exceeded %d",
 618                        MEI_MAX_OPEN_HANDLE_COUNT);
 619                return -EMFILE;
 620        }
 621
 622        dev->open_handle_count++;
 623
 624        cl->host_client_id = id;
 625        list_add_tail(&cl->link, &dev->file_list);
 626
 627        set_bit(id, dev->host_clients_map);
 628
 629        cl->state = MEI_FILE_INITIALIZING;
 630
 631        cl_dbg(dev, cl, "link cl\n");
 632        return 0;
 633}
 634
 635/**
 636 * mei_cl_unlink - remove host client from the list
 637 *
 638 * @cl: host client
 639 *
 640 * Return: always 0
 641 */
 642int mei_cl_unlink(struct mei_cl *cl)
 643{
 644        struct mei_device *dev;
 645
 646        /* don't shout on error exit path */
 647        if (!cl)
 648                return 0;
 649
 650        if (WARN_ON(!cl->dev))
 651                return 0;
 652
 653        dev = cl->dev;
 654
 655        cl_dbg(dev, cl, "unlink client");
 656
 657        if (dev->open_handle_count > 0)
 658                dev->open_handle_count--;
 659
 660        /* never clear the 0 bit */
 661        if (cl->host_client_id)
 662                clear_bit(cl->host_client_id, dev->host_clients_map);
 663
 664        list_del_init(&cl->link);
 665
 666        cl->state = MEI_FILE_UNINITIALIZED;
 667        cl->writing_state = MEI_IDLE;
 668
 669        WARN_ON(!list_empty(&cl->rd_completed) ||
 670                !list_empty(&cl->rd_pending) ||
 671                !list_empty(&cl->link));
 672
 673        return 0;
 674}
 675
 676void mei_host_client_init(struct mei_device *dev)
 677{
 678        dev->dev_state = MEI_DEV_ENABLED;
 679        dev->reset_count = 0;
 680
 681        schedule_work(&dev->bus_rescan_work);
 682
 683        pm_runtime_mark_last_busy(dev->dev);
 684        dev_dbg(dev->dev, "rpm: autosuspend\n");
 685        pm_request_autosuspend(dev->dev);
 686}
 687
 688/**
 689 * mei_hbuf_acquire - try to acquire host buffer
 690 *
 691 * @dev: the device structure
 692 * Return: true if host buffer was acquired
 693 */
 694bool mei_hbuf_acquire(struct mei_device *dev)
 695{
 696        if (mei_pg_state(dev) == MEI_PG_ON ||
 697            mei_pg_in_transition(dev)) {
 698                dev_dbg(dev->dev, "device is in pg\n");
 699                return false;
 700        }
 701
 702        if (!dev->hbuf_is_ready) {
 703                dev_dbg(dev->dev, "hbuf is not ready\n");
 704                return false;
 705        }
 706
 707        dev->hbuf_is_ready = false;
 708
 709        return true;
 710}
 711
 712/**
 713 * mei_cl_wake_all - wake up readers, writers and event waiters so
 714 *                 they can be interrupted
 715 *
 716 * @cl: host client
 717 */
 718static void mei_cl_wake_all(struct mei_cl *cl)
 719{
 720        struct mei_device *dev = cl->dev;
 721
 722        /* synchronized under device mutex */
 723        if (waitqueue_active(&cl->rx_wait)) {
 724                cl_dbg(dev, cl, "Waking up reading client!\n");
 725                wake_up_interruptible(&cl->rx_wait);
 726        }
 727        /* synchronized under device mutex */
 728        if (waitqueue_active(&cl->tx_wait)) {
 729                cl_dbg(dev, cl, "Waking up writing client!\n");
 730                wake_up_interruptible(&cl->tx_wait);
 731        }
 732        /* synchronized under device mutex */
 733        if (waitqueue_active(&cl->ev_wait)) {
 734                cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
 735                wake_up_interruptible(&cl->ev_wait);
 736        }
 737        /* synchronized under device mutex */
 738        if (waitqueue_active(&cl->wait)) {
 739                cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
 740                wake_up(&cl->wait);
 741        }
 742}
 743
 744/**
 745 * mei_cl_set_disconnected - set disconnected state and clear
 746 *   associated states and resources
 747 *
 748 * @cl: host client
 749 */
 750static void mei_cl_set_disconnected(struct mei_cl *cl)
 751{
 752        struct mei_device *dev = cl->dev;
 753
 754        if (cl->state == MEI_FILE_DISCONNECTED ||
 755            cl->state <= MEI_FILE_INITIALIZING)
 756                return;
 757
 758        cl->state = MEI_FILE_DISCONNECTED;
 759        mei_io_list_free_cl(&dev->write_list, cl);
 760        mei_io_list_free_cl(&dev->write_waiting_list, cl);
 761        mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
 762        mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
 763        mei_cl_wake_all(cl);
 764        cl->rx_flow_ctrl_creds = 0;
 765        cl->tx_flow_ctrl_creds = 0;
 766        cl->timer_count = 0;
 767
 768        mei_cl_bus_module_put(cl);
 769
 770        if (!cl->me_cl)
 771                return;
 772
 773        if (!WARN_ON(cl->me_cl->connect_count == 0))
 774                cl->me_cl->connect_count--;
 775
 776        if (cl->me_cl->connect_count == 0)
 777                cl->me_cl->tx_flow_ctrl_creds = 0;
 778
 779        mei_me_cl_put(cl->me_cl);
 780        cl->me_cl = NULL;
 781}
 782
 783static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
 784{
 785        if (!mei_me_cl_get(me_cl))
 786                return -ENOENT;
 787
 788        /* only one connection is allowed for fixed address clients */
 789        if (me_cl->props.fixed_address) {
 790                if (me_cl->connect_count) {
 791                        mei_me_cl_put(me_cl);
 792                        return -EBUSY;
 793                }
 794        }
 795
 796        cl->me_cl = me_cl;
 797        cl->state = MEI_FILE_CONNECTING;
 798        cl->me_cl->connect_count++;
 799
 800        return 0;
 801}
 802
 803/*
 804 * mei_cl_send_disconnect - send disconnect request
 805 *
 806 * @cl: host client
 807 * @cb: callback block
 808 *
 809 * Return: 0, OK; otherwise, error.
 810 */
 811static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
 812{
 813        struct mei_device *dev;
 814        int ret;
 815
 816        dev = cl->dev;
 817
 818        ret = mei_hbm_cl_disconnect_req(dev, cl);
 819        cl->status = ret;
 820        if (ret) {
 821                cl->state = MEI_FILE_DISCONNECT_REPLY;
 822                return ret;
 823        }
 824
 825        list_move_tail(&cb->list, &dev->ctrl_rd_list);
 826        cl->timer_count = MEI_CONNECT_TIMEOUT;
 827        mei_schedule_stall_timer(dev);
 828
 829        return 0;
 830}
 831
 832/**
 833 * mei_cl_irq_disconnect - processes close related operation from
 834 *      interrupt thread context - send disconnect request
 835 *
 836 * @cl: client
 837 * @cb: callback block.
 838 * @cmpl_list: complete list.
 839 *
 840 * Return: 0, OK; otherwise, error.
 841 */
 842int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
 843                          struct list_head *cmpl_list)
 844{
 845        struct mei_device *dev = cl->dev;
 846        u32 msg_slots;
 847        int slots;
 848        int ret;
 849
 850        msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
 851        slots = mei_hbuf_empty_slots(dev);
 852
 853        if (slots < msg_slots)
 854                return -EMSGSIZE;
 855
 856        ret = mei_cl_send_disconnect(cl, cb);
 857        if (ret)
 858                list_move_tail(&cb->list, cmpl_list);
 859
 860        return ret;
 861}
 862
 863/**
 864 * __mei_cl_disconnect - disconnect host client from the me one
 865 *     internal function runtime pm has to be already acquired
 866 *
 867 * @cl: host client
 868 *
 869 * Return: 0 on success, <0 on failure.
 870 */
 871static int __mei_cl_disconnect(struct mei_cl *cl)
 872{
 873        struct mei_device *dev;
 874        struct mei_cl_cb *cb;
 875        int rets;
 876
 877        dev = cl->dev;
 878
 879        cl->state = MEI_FILE_DISCONNECTING;
 880
 881        cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
 882        if (!cb) {
 883                rets = -ENOMEM;
 884                goto out;
 885        }
 886
 887        if (mei_hbuf_acquire(dev)) {
 888                rets = mei_cl_send_disconnect(cl, cb);
 889                if (rets) {
 890                        cl_err(dev, cl, "failed to disconnect.\n");
 891                        goto out;
 892                }
 893        }
 894
 895        mutex_unlock(&dev->device_lock);
 896        wait_event_timeout(cl->wait,
 897                           cl->state == MEI_FILE_DISCONNECT_REPLY ||
 898                           cl->state == MEI_FILE_DISCONNECTED,
 899                           mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
 900        mutex_lock(&dev->device_lock);
 901
 902        rets = cl->status;
 903        if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
 904            cl->state != MEI_FILE_DISCONNECTED) {
 905                cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
 906                rets = -ETIME;
 907        }
 908
 909out:
 910        /* we disconnect also on error */
 911        mei_cl_set_disconnected(cl);
 912        if (!rets)
 913                cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
 914
 915        mei_io_cb_free(cb);
 916        return rets;
 917}
 918
 919/**
 920 * mei_cl_disconnect - disconnect host client from the me one
 921 *
 922 * @cl: host client
 923 *
 924 * Locking: called under "dev->device_lock" lock
 925 *
 926 * Return: 0 on success, <0 on failure.
 927 */
 928int mei_cl_disconnect(struct mei_cl *cl)
 929{
 930        struct mei_device *dev;
 931        int rets;
 932
 933        if (WARN_ON(!cl || !cl->dev))
 934                return -ENODEV;
 935
 936        dev = cl->dev;
 937
 938        cl_dbg(dev, cl, "disconnecting");
 939
 940        if (!mei_cl_is_connected(cl))
 941                return 0;
 942
 943        if (mei_cl_is_fixed_address(cl)) {
 944                mei_cl_set_disconnected(cl);
 945                return 0;
 946        }
 947
 948        if (dev->dev_state == MEI_DEV_POWER_DOWN) {
 949                cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
 950                mei_cl_set_disconnected(cl);
 951                return 0;
 952        }
 953
 954        rets = pm_runtime_get(dev->dev);
 955        if (rets < 0 && rets != -EINPROGRESS) {
 956                pm_runtime_put_noidle(dev->dev);
 957                cl_err(dev, cl, "rpm: get failed %d\n", rets);
 958                return rets;
 959        }
 960
 961        rets = __mei_cl_disconnect(cl);
 962
 963        cl_dbg(dev, cl, "rpm: autosuspend\n");
 964        pm_runtime_mark_last_busy(dev->dev);
 965        pm_runtime_put_autosuspend(dev->dev);
 966
 967        return rets;
 968}
 969
 970
 971/**
 972 * mei_cl_is_other_connecting - checks if other
 973 *    client with the same me client id is connecting
 974 *
 975 * @cl: private data of the file object
 976 *
 977 * Return: true if other client is connected, false - otherwise.
 978 */
 979static bool mei_cl_is_other_connecting(struct mei_cl *cl)
 980{
 981        struct mei_device *dev;
 982        struct mei_cl_cb *cb;
 983
 984        dev = cl->dev;
 985
 986        list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
 987                if (cb->fop_type == MEI_FOP_CONNECT &&
 988                    mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
 989                        return true;
 990        }
 991
 992        return false;
 993}
 994
 995/**
 996 * mei_cl_send_connect - send connect request
 997 *
 998 * @cl: host client
 999 * @cb: callback block
1000 *
1001 * Return: 0, OK; otherwise, error.
1002 */
1003static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
1004{
1005        struct mei_device *dev;
1006        int ret;
1007
1008        dev = cl->dev;
1009
1010        ret = mei_hbm_cl_connect_req(dev, cl);
1011        cl->status = ret;
1012        if (ret) {
1013                cl->state = MEI_FILE_DISCONNECT_REPLY;
1014                return ret;
1015        }
1016
1017        list_move_tail(&cb->list, &dev->ctrl_rd_list);
1018        cl->timer_count = MEI_CONNECT_TIMEOUT;
1019        mei_schedule_stall_timer(dev);
1020        return 0;
1021}
1022
1023/**
1024 * mei_cl_irq_connect - send connect request in irq_thread context
1025 *
1026 * @cl: host client
1027 * @cb: callback block
1028 * @cmpl_list: complete list
1029 *
1030 * Return: 0, OK; otherwise, error.
1031 */
1032int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
1033                       struct list_head *cmpl_list)
1034{
1035        struct mei_device *dev = cl->dev;
1036        u32 msg_slots;
1037        int slots;
1038        int rets;
1039
1040        msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
1041        slots = mei_hbuf_empty_slots(dev);
1042
1043        if (mei_cl_is_other_connecting(cl))
1044                return 0;
1045
1046        if (slots < msg_slots)
1047                return -EMSGSIZE;
1048
1049        rets = mei_cl_send_connect(cl, cb);
1050        if (rets)
1051                list_move_tail(&cb->list, cmpl_list);
1052
1053        return rets;
1054}
1055
1056/**
1057 * mei_cl_connect - connect host client to the me one
1058 *
1059 * @cl: host client
1060 * @me_cl: me client
1061 * @fp: pointer to file structure
1062 *
1063 * Locking: called under "dev->device_lock" lock
1064 *
1065 * Return: 0 on success, <0 on failure.
1066 */
1067int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
1068                   const struct file *fp)
1069{
1070        struct mei_device *dev;
1071        struct mei_cl_cb *cb;
1072        int rets;
1073
1074        if (WARN_ON(!cl || !cl->dev || !me_cl))
1075                return -ENODEV;
1076
1077        dev = cl->dev;
1078
1079        if (!mei_cl_bus_module_get(cl))
1080                return -ENODEV;
1081
1082        rets = mei_cl_set_connecting(cl, me_cl);
1083        if (rets)
1084                goto nortpm;
1085
1086        if (mei_cl_is_fixed_address(cl)) {
1087                cl->state = MEI_FILE_CONNECTED;
1088                rets = 0;
1089                goto nortpm;
1090        }
1091
1092        rets = pm_runtime_get(dev->dev);
1093        if (rets < 0 && rets != -EINPROGRESS) {
1094                pm_runtime_put_noidle(dev->dev);
1095                cl_err(dev, cl, "rpm: get failed %d\n", rets);
1096                goto nortpm;
1097        }
1098
1099        cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
1100        if (!cb) {
1101                rets = -ENOMEM;
1102                goto out;
1103        }
1104
1105        /* run hbuf acquire last so we don't have to undo */
1106        if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
1107                rets = mei_cl_send_connect(cl, cb);
1108                if (rets)
1109                        goto out;
1110        }
1111
1112        mutex_unlock(&dev->device_lock);
1113        wait_event_timeout(cl->wait,
1114                        (cl->state == MEI_FILE_CONNECTED ||
1115                         cl->state == MEI_FILE_DISCONNECTED ||
1116                         cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
1117                         cl->state == MEI_FILE_DISCONNECT_REPLY),
1118                        mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
1119        mutex_lock(&dev->device_lock);
1120
1121        if (!mei_cl_is_connected(cl)) {
1122                if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
1123                        mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
1124                        mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
1125                         /* ignore disconnect return valuue;
1126                          * in case of failure reset will be invoked
1127                          */
1128                        __mei_cl_disconnect(cl);
1129                        rets = -EFAULT;
1130                        goto out;
1131                }
1132
1133                /* timeout or something went really wrong */
1134                if (!cl->status)
1135                        cl->status = -EFAULT;
1136        }
1137
1138        rets = cl->status;
1139out:
1140        cl_dbg(dev, cl, "rpm: autosuspend\n");
1141        pm_runtime_mark_last_busy(dev->dev);
1142        pm_runtime_put_autosuspend(dev->dev);
1143
1144        mei_io_cb_free(cb);
1145
1146nortpm:
1147        if (!mei_cl_is_connected(cl))
1148                mei_cl_set_disconnected(cl);
1149
1150        return rets;
1151}
1152
1153/**
1154 * mei_cl_alloc_linked - allocate and link host client
1155 *
1156 * @dev: the device structure
1157 *
1158 * Return: cl on success ERR_PTR on failure
1159 */
1160struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
1161{
1162        struct mei_cl *cl;
1163        int ret;
1164
1165        cl = mei_cl_allocate(dev);
1166        if (!cl) {
1167                ret = -ENOMEM;
1168                goto err;
1169        }
1170
1171        ret = mei_cl_link(cl);
1172        if (ret)
1173                goto err;
1174
1175        return cl;
1176err:
1177        kfree(cl);
1178        return ERR_PTR(ret);
1179}
1180
1181/**
1182 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
1183 *
1184 * @cl: host client
1185 *
1186 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
1187 */
1188static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
1189{
1190        if (WARN_ON(!cl || !cl->me_cl))
1191                return -EINVAL;
1192
1193        if (cl->tx_flow_ctrl_creds > 0)
1194                return 1;
1195
1196        if (mei_cl_is_fixed_address(cl))
1197                return 1;
1198
1199        if (mei_cl_is_single_recv_buf(cl)) {
1200                if (cl->me_cl->tx_flow_ctrl_creds > 0)
1201                        return 1;
1202        }
1203        return 0;
1204}
1205
1206/**
1207 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
1208 *   for a client
1209 *
1210 * @cl: host client
1211 *
1212 * Return:
1213 *      0 on success
1214 *      -EINVAL when ctrl credits are <= 0
1215 */
1216static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
1217{
1218        if (WARN_ON(!cl || !cl->me_cl))
1219                return -EINVAL;
1220
1221        if (mei_cl_is_fixed_address(cl))
1222                return 0;
1223
1224        if (mei_cl_is_single_recv_buf(cl)) {
1225                if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
1226                        return -EINVAL;
1227                cl->me_cl->tx_flow_ctrl_creds--;
1228        } else {
1229                if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
1230                        return -EINVAL;
1231                cl->tx_flow_ctrl_creds--;
1232        }
1233        return 0;
1234}
1235
1236/**
1237 *  mei_cl_notify_fop2req - convert fop to proper request
1238 *
1239 * @fop: client notification start response command
1240 *
1241 * Return:  MEI_HBM_NOTIFICATION_START/STOP
1242 */
1243u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
1244{
1245        if (fop == MEI_FOP_NOTIFY_START)
1246                return MEI_HBM_NOTIFICATION_START;
1247        else
1248                return MEI_HBM_NOTIFICATION_STOP;
1249}
1250
1251/**
1252 *  mei_cl_notify_req2fop - convert notification request top file operation type
1253 *
1254 * @req: hbm notification request type
1255 *
1256 * Return:  MEI_FOP_NOTIFY_START/STOP
1257 */
1258enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
1259{
1260        if (req == MEI_HBM_NOTIFICATION_START)
1261                return MEI_FOP_NOTIFY_START;
1262        else
1263                return MEI_FOP_NOTIFY_STOP;
1264}
1265
1266/**
1267 * mei_cl_irq_notify - send notification request in irq_thread context
1268 *
1269 * @cl: client
1270 * @cb: callback block.
1271 * @cmpl_list: complete list.
1272 *
1273 * Return: 0 on such and error otherwise.
1274 */
1275int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
1276                      struct list_head *cmpl_list)
1277{
1278        struct mei_device *dev = cl->dev;
1279        u32 msg_slots;
1280        int slots;
1281        int ret;
1282        bool request;
1283
1284        msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
1285        slots = mei_hbuf_empty_slots(dev);
1286
1287        if (slots < msg_slots)
1288                return -EMSGSIZE;
1289
1290        request = mei_cl_notify_fop2req(cb->fop_type);
1291        ret = mei_hbm_cl_notify_req(dev, cl, request);
1292        if (ret) {
1293                cl->status = ret;
1294                list_move_tail(&cb->list, cmpl_list);
1295                return ret;
1296        }
1297
1298        list_move_tail(&cb->list, &dev->ctrl_rd_list);
1299        return 0;
1300}
1301
1302/**
1303 * mei_cl_notify_request - send notification stop/start request
1304 *
1305 * @cl: host client
1306 * @fp: associate request with file
1307 * @request: 1 for start or 0 for stop
1308 *
1309 * Locking: called under "dev->device_lock" lock
1310 *
1311 * Return: 0 on such and error otherwise.
1312 */
1313int mei_cl_notify_request(struct mei_cl *cl,
1314                          const struct file *fp, u8 request)
1315{
1316        struct mei_device *dev;
1317        struct mei_cl_cb *cb;
1318        enum mei_cb_file_ops fop_type;
1319        int rets;
1320
1321        if (WARN_ON(!cl || !cl->dev))
1322                return -ENODEV;
1323
1324        dev = cl->dev;
1325
1326        if (!dev->hbm_f_ev_supported) {
1327                cl_dbg(dev, cl, "notifications not supported\n");
1328                return -EOPNOTSUPP;
1329        }
1330
1331        if (!mei_cl_is_connected(cl))
1332                return -ENODEV;
1333
1334        rets = pm_runtime_get(dev->dev);
1335        if (rets < 0 && rets != -EINPROGRESS) {
1336                pm_runtime_put_noidle(dev->dev);
1337                cl_err(dev, cl, "rpm: get failed %d\n", rets);
1338                return rets;
1339        }
1340
1341        fop_type = mei_cl_notify_req2fop(request);
1342        cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
1343        if (!cb) {
1344                rets = -ENOMEM;
1345                goto out;
1346        }
1347
1348        if (mei_hbuf_acquire(dev)) {
1349                if (mei_hbm_cl_notify_req(dev, cl, request)) {
1350                        rets = -ENODEV;
1351                        goto out;
1352                }
1353                list_move_tail(&cb->list, &dev->ctrl_rd_list);
1354        }
1355
1356        mutex_unlock(&dev->device_lock);
1357        wait_event_timeout(cl->wait,
1358                           cl->notify_en == request || !mei_cl_is_connected(cl),
1359                           mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
1360        mutex_lock(&dev->device_lock);
1361
1362        if (cl->notify_en != request && !cl->status)
1363                cl->status = -EFAULT;
1364
1365        rets = cl->status;
1366
1367out:
1368        cl_dbg(dev, cl, "rpm: autosuspend\n");
1369        pm_runtime_mark_last_busy(dev->dev);
1370        pm_runtime_put_autosuspend(dev->dev);
1371
1372        mei_io_cb_free(cb);
1373        return rets;
1374}
1375
1376/**
1377 * mei_cl_notify - raise notification
1378 *
1379 * @cl: host client
1380 *
1381 * Locking: called under "dev->device_lock" lock
1382 */
1383void mei_cl_notify(struct mei_cl *cl)
1384{
1385        struct mei_device *dev;
1386
1387        if (!cl || !cl->dev)
1388                return;
1389
1390        dev = cl->dev;
1391
1392        if (!cl->notify_en)
1393                return;
1394
1395        cl_dbg(dev, cl, "notify event");
1396        cl->notify_ev = true;
1397        if (!mei_cl_bus_notify_event(cl))
1398                wake_up_interruptible(&cl->ev_wait);
1399
1400        if (cl->ev_async)
1401                kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
1402
1403}
1404
1405/**
1406 * mei_cl_notify_get - get or wait for notification event
1407 *
1408 * @cl: host client
1409 * @block: this request is blocking
1410 * @notify_ev: true if notification event was received
1411 *
1412 * Locking: called under "dev->device_lock" lock
1413 *
1414 * Return: 0 on such and error otherwise.
1415 */
1416int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
1417{
1418        struct mei_device *dev;
1419        int rets;
1420
1421        *notify_ev = false;
1422
1423        if (WARN_ON(!cl || !cl->dev))
1424                return -ENODEV;
1425
1426        dev = cl->dev;
1427
1428        if (!dev->hbm_f_ev_supported) {
1429                cl_dbg(dev, cl, "notifications not supported\n");
1430                return -EOPNOTSUPP;
1431        }
1432
1433        if (!mei_cl_is_connected(cl))
1434                return -ENODEV;
1435
1436        if (cl->notify_ev)
1437                goto out;
1438
1439        if (!block)
1440                return -EAGAIN;
1441
1442        mutex_unlock(&dev->device_lock);
1443        rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
1444        mutex_lock(&dev->device_lock);
1445
1446        if (rets < 0)
1447                return rets;
1448
1449out:
1450        *notify_ev = cl->notify_ev;
1451        cl->notify_ev = false;
1452        return 0;
1453}
1454
1455/**
1456 * mei_cl_read_start - the start read client message function.
1457 *
1458 * @cl: host client
1459 * @length: number of bytes to read
1460 * @fp: pointer to file structure
1461 *
1462 * Return: 0 on success, <0 on failure.
1463 */
1464int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
1465{
1466        struct mei_device *dev;
1467        struct mei_cl_cb *cb;
1468        int rets;
1469
1470        if (WARN_ON(!cl || !cl->dev))
1471                return -ENODEV;
1472
1473        dev = cl->dev;
1474
1475        if (!mei_cl_is_connected(cl))
1476                return -ENODEV;
1477
1478        if (!mei_me_cl_is_active(cl->me_cl)) {
1479                cl_err(dev, cl, "no such me client\n");
1480                return  -ENOTTY;
1481        }
1482
1483        if (mei_cl_is_fixed_address(cl))
1484                return 0;
1485
1486        /* HW currently supports only one pending read */
1487        if (cl->rx_flow_ctrl_creds)
1488                return -EBUSY;
1489
1490        cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
1491        if (!cb)
1492                return -ENOMEM;
1493
1494        rets = pm_runtime_get(dev->dev);
1495        if (rets < 0 && rets != -EINPROGRESS) {
1496                pm_runtime_put_noidle(dev->dev);
1497                cl_err(dev, cl, "rpm: get failed %d\n", rets);
1498                goto nortpm;
1499        }
1500
1501        rets = 0;
1502        if (mei_hbuf_acquire(dev)) {
1503                rets = mei_hbm_cl_flow_control_req(dev, cl);
1504                if (rets < 0)
1505                        goto out;
1506
1507                list_move_tail(&cb->list, &cl->rd_pending);
1508        }
1509        cl->rx_flow_ctrl_creds++;
1510
1511out:
1512        cl_dbg(dev, cl, "rpm: autosuspend\n");
1513        pm_runtime_mark_last_busy(dev->dev);
1514        pm_runtime_put_autosuspend(dev->dev);
1515nortpm:
1516        if (rets)
1517                mei_io_cb_free(cb);
1518
1519        return rets;
1520}
1521
1522/**
1523 * mei_cl_irq_write - write a message to device
1524 *      from the interrupt thread context
1525 *
1526 * @cl: client
1527 * @cb: callback block.
1528 * @cmpl_list: complete list.
1529 *
1530 * Return: 0, OK; otherwise error.
1531 */
1532int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1533                     struct list_head *cmpl_list)
1534{
1535        struct mei_device *dev;
1536        struct mei_msg_data *buf;
1537        struct mei_msg_hdr mei_hdr;
1538        size_t len;
1539        u32 msg_slots;
1540        int slots;
1541        int rets;
1542        bool first_chunk;
1543
1544        if (WARN_ON(!cl || !cl->dev))
1545                return -ENODEV;
1546
1547        dev = cl->dev;
1548
1549        buf = &cb->buf;
1550
1551        first_chunk = cb->buf_idx == 0;
1552
1553        rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
1554        if (rets < 0)
1555                goto err;
1556
1557        if (rets == 0) {
1558                cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1559                return 0;
1560        }
1561
1562        slots = mei_hbuf_empty_slots(dev);
1563        len = buf->size - cb->buf_idx;
1564        msg_slots = mei_data2slots(len);
1565
1566        mei_hdr.host_addr = mei_cl_host_addr(cl);
1567        mei_hdr.me_addr = mei_cl_me_id(cl);
1568        mei_hdr.reserved = 0;
1569        mei_hdr.internal = cb->internal;
1570
1571        if (slots >= msg_slots) {
1572                mei_hdr.length = len;
1573                mei_hdr.msg_complete = 1;
1574        /* Split the message only if we can write the whole host buffer */
1575        } else if (slots == dev->hbuf_depth) {
1576                msg_slots = slots;
1577                len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
1578                mei_hdr.length = len;
1579                mei_hdr.msg_complete = 0;
1580        } else {
1581                /* wait for next time the host buffer is empty */
1582                return 0;
1583        }
1584
1585        cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n",
1586                        cb->buf.size, cb->buf_idx);
1587
1588        rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
1589        if (rets)
1590                goto err;
1591
1592        cl->status = 0;
1593        cl->writing_state = MEI_WRITING;
1594        cb->buf_idx += mei_hdr.length;
1595        cb->completed = mei_hdr.msg_complete == 1;
1596
1597        if (first_chunk) {
1598                if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
1599                        rets = -EIO;
1600                        goto err;
1601                }
1602        }
1603
1604        if (mei_hdr.msg_complete)
1605                list_move_tail(&cb->list, &dev->write_waiting_list);
1606
1607        return 0;
1608
1609err:
1610        cl->status = rets;
1611        list_move_tail(&cb->list, cmpl_list);
1612        return rets;
1613}
1614
1615/**
1616 * mei_cl_write - submit a write cb to mei device
1617 *      assumes device_lock is locked
1618 *
1619 * @cl: host client
1620 * @cb: write callback with filled data
1621 *
1622 * Return: number of bytes sent on success, <0 on failure.
1623 */
1624int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
1625{
1626        struct mei_device *dev;
1627        struct mei_msg_data *buf;
1628        struct mei_msg_hdr mei_hdr;
1629        int size;
1630        int rets;
1631        bool blocking;
1632
1633        if (WARN_ON(!cl || !cl->dev))
1634                return -ENODEV;
1635
1636        if (WARN_ON(!cb))
1637                return -EINVAL;
1638
1639        dev = cl->dev;
1640
1641        buf = &cb->buf;
1642        size = buf->size;
1643        blocking = cb->blocking;
1644
1645        cl_dbg(dev, cl, "size=%d\n", size);
1646
1647        rets = pm_runtime_get(dev->dev);
1648        if (rets < 0 && rets != -EINPROGRESS) {
1649                pm_runtime_put_noidle(dev->dev);
1650                cl_err(dev, cl, "rpm: get failed %d\n", rets);
1651                goto free;
1652        }
1653
1654        cb->buf_idx = 0;
1655        cl->writing_state = MEI_IDLE;
1656
1657        mei_hdr.host_addr = mei_cl_host_addr(cl);
1658        mei_hdr.me_addr = mei_cl_me_id(cl);
1659        mei_hdr.reserved = 0;
1660        mei_hdr.msg_complete = 0;
1661        mei_hdr.internal = cb->internal;
1662
1663        rets = mei_cl_tx_flow_ctrl_creds(cl);
1664        if (rets < 0)
1665                goto err;
1666
1667        if (rets == 0) {
1668                cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1669                rets = size;
1670                goto out;
1671        }
1672        if (!mei_hbuf_acquire(dev)) {
1673                cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
1674                rets = size;
1675                goto out;
1676        }
1677
1678        /* Check for a maximum length */
1679        if (size > mei_hbuf_max_len(dev)) {
1680                mei_hdr.length = mei_hbuf_max_len(dev);
1681                mei_hdr.msg_complete = 0;
1682        } else {
1683                mei_hdr.length = size;
1684                mei_hdr.msg_complete = 1;
1685        }
1686
1687        rets = mei_write_message(dev, &mei_hdr, buf->data);
1688        if (rets)
1689                goto err;
1690
1691        rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
1692        if (rets)
1693                goto err;
1694
1695        cl->writing_state = MEI_WRITING;
1696        cb->buf_idx = mei_hdr.length;
1697        cb->completed = mei_hdr.msg_complete == 1;
1698
1699out:
1700        if (mei_hdr.msg_complete)
1701                list_add_tail(&cb->list, &dev->write_waiting_list);
1702        else
1703                list_add_tail(&cb->list, &dev->write_list);
1704
1705        cb = NULL;
1706        if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
1707
1708                mutex_unlock(&dev->device_lock);
1709                rets = wait_event_interruptible(cl->tx_wait,
1710                                cl->writing_state == MEI_WRITE_COMPLETE ||
1711                                (!mei_cl_is_connected(cl)));
1712                mutex_lock(&dev->device_lock);
1713                /* wait_event_interruptible returns -ERESTARTSYS */
1714                if (rets) {
1715                        if (signal_pending(current))
1716                                rets = -EINTR;
1717                        goto err;
1718                }
1719                if (cl->writing_state != MEI_WRITE_COMPLETE) {
1720                        rets = -EFAULT;
1721                        goto err;
1722                }
1723        }
1724
1725        rets = size;
1726err:
1727        cl_dbg(dev, cl, "rpm: autosuspend\n");
1728        pm_runtime_mark_last_busy(dev->dev);
1729        pm_runtime_put_autosuspend(dev->dev);
1730free:
1731        mei_io_cb_free(cb);
1732
1733        return rets;
1734}
1735
1736
1737/**
1738 * mei_cl_complete - processes completed operation for a client
1739 *
1740 * @cl: private data of the file object.
1741 * @cb: callback block.
1742 */
1743void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
1744{
1745        struct mei_device *dev = cl->dev;
1746
1747        switch (cb->fop_type) {
1748        case MEI_FOP_WRITE:
1749                mei_io_cb_free(cb);
1750                cl->writing_state = MEI_WRITE_COMPLETE;
1751                if (waitqueue_active(&cl->tx_wait)) {
1752                        wake_up_interruptible(&cl->tx_wait);
1753                } else {
1754                        pm_runtime_mark_last_busy(dev->dev);
1755                        pm_request_autosuspend(dev->dev);
1756                }
1757                break;
1758
1759        case MEI_FOP_READ:
1760                list_add_tail(&cb->list, &cl->rd_completed);
1761                if (!mei_cl_is_fixed_address(cl) &&
1762                    !WARN_ON(!cl->rx_flow_ctrl_creds))
1763                        cl->rx_flow_ctrl_creds--;
1764                if (!mei_cl_bus_rx_event(cl))
1765                        wake_up_interruptible(&cl->rx_wait);
1766                break;
1767
1768        case MEI_FOP_CONNECT:
1769        case MEI_FOP_DISCONNECT:
1770        case MEI_FOP_NOTIFY_STOP:
1771        case MEI_FOP_NOTIFY_START:
1772                if (waitqueue_active(&cl->wait))
1773                        wake_up(&cl->wait);
1774
1775                break;
1776        case MEI_FOP_DISCONNECT_RSP:
1777                mei_io_cb_free(cb);
1778                mei_cl_set_disconnected(cl);
1779                break;
1780        default:
1781                BUG_ON(0);
1782        }
1783}
1784
1785
1786/**
1787 * mei_cl_all_disconnect - disconnect forcefully all connected clients
1788 *
1789 * @dev: mei device
1790 */
1791void mei_cl_all_disconnect(struct mei_device *dev)
1792{
1793        struct mei_cl *cl;
1794
1795        list_for_each_entry(cl, &dev->file_list, link)
1796                mei_cl_set_disconnected(cl);
1797}
1798