linux/drivers/misc/mei/interrupt.c
<<
>>
Prefs
   1/*
   2 *
   3 * Intel Management Engine Interface (Intel MEI) Linux driver
   4 * Copyright (c) 2003-2012, Intel Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 */
  16
  17
  18#include <linux/export.h>
  19#include <linux/kthread.h>
  20#include <linux/interrupt.h>
  21#include <linux/fs.h>
  22#include <linux/jiffies.h>
  23#include <linux/slab.h>
  24#include <linux/pm_runtime.h>
  25
  26#include <linux/mei.h>
  27
  28#include "mei_dev.h"
  29#include "hbm.h"
  30#include "client.h"
  31
  32
  33/**
  34 * mei_irq_compl_handler - dispatch complete handlers
  35 *      for the completed callbacks
  36 *
  37 * @dev: mei device
  38 * @compl_list: list of completed cbs
  39 */
  40void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list)
  41{
  42        struct mei_cl_cb *cb, *next;
  43        struct mei_cl *cl;
  44
  45        list_for_each_entry_safe(cb, next, &compl_list->list, list) {
  46                cl = cb->cl;
  47                list_del_init(&cb->list);
  48
  49                dev_dbg(dev->dev, "completing call back.\n");
  50                if (cl == &dev->iamthif_cl)
  51                        mei_amthif_complete(cl, cb);
  52                else
  53                        mei_cl_complete(cl, cb);
  54        }
  55}
  56EXPORT_SYMBOL_GPL(mei_irq_compl_handler);
  57
  58/**
  59 * mei_cl_hbm_equal - check if hbm is addressed to the client
  60 *
  61 * @cl: host client
  62 * @mei_hdr: header of mei client message
  63 *
  64 * Return: true if matches, false otherwise
  65 */
  66static inline int mei_cl_hbm_equal(struct mei_cl *cl,
  67                        struct mei_msg_hdr *mei_hdr)
  68{
  69        return  mei_cl_host_addr(cl) == mei_hdr->host_addr &&
  70                mei_cl_me_id(cl) == mei_hdr->me_addr;
  71}
  72
  73/**
  74 * mei_irq_discard_msg  - discard received message
  75 *
  76 * @dev: mei device
  77 * @hdr: message header
  78 */
  79static inline
  80void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
  81{
  82        /*
  83         * no need to check for size as it is guarantied
  84         * that length fits into rd_msg_buf
  85         */
  86        mei_read_slots(dev, dev->rd_msg_buf, hdr->length);
  87        dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n",
  88                MEI_HDR_PRM(hdr));
  89}
  90
  91/**
  92 * mei_cl_irq_read_msg - process client message
  93 *
  94 * @cl: reading client
  95 * @mei_hdr: header of mei client message
  96 * @complete_list: completion list
  97 *
  98 * Return: always 0
  99 */
 100int mei_cl_irq_read_msg(struct mei_cl *cl,
 101                       struct mei_msg_hdr *mei_hdr,
 102                       struct mei_cl_cb *complete_list)
 103{
 104        struct mei_device *dev = cl->dev;
 105        struct mei_cl_cb *cb;
 106        unsigned char *buffer = NULL;
 107        size_t buf_sz;
 108
 109        cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
 110        if (!cb) {
 111                cl_err(dev, cl, "pending read cb not found\n");
 112                goto out;
 113        }
 114
 115        if (!mei_cl_is_connected(cl)) {
 116                cl_dbg(dev, cl, "not connected\n");
 117                cb->status = -ENODEV;
 118                goto out;
 119        }
 120
 121        if (cb->buf.size == 0 || cb->buf.data == NULL) {
 122                cl_err(dev, cl, "response buffer is not allocated.\n");
 123                list_move_tail(&cb->list, &complete_list->list);
 124                cb->status = -ENOMEM;
 125                goto out;
 126        }
 127
 128        buf_sz = mei_hdr->length + cb->buf_idx;
 129        /* catch for integer overflow */
 130        if (buf_sz < cb->buf_idx) {
 131                cl_err(dev, cl, "message is too big len %d idx %zu\n",
 132                       mei_hdr->length, cb->buf_idx);
 133
 134                list_move_tail(&cb->list, &complete_list->list);
 135                cb->status = -EMSGSIZE;
 136                goto out;
 137        }
 138
 139        if (cb->buf.size < buf_sz) {
 140                cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n",
 141                        cb->buf.size, mei_hdr->length, cb->buf_idx);
 142                buffer = krealloc(cb->buf.data, buf_sz, GFP_KERNEL);
 143
 144                if (!buffer) {
 145                        cb->status = -ENOMEM;
 146                        list_move_tail(&cb->list, &complete_list->list);
 147                        goto out;
 148                }
 149                cb->buf.data = buffer;
 150                cb->buf.size = buf_sz;
 151        }
 152
 153        buffer = cb->buf.data + cb->buf_idx;
 154        mei_read_slots(dev, buffer, mei_hdr->length);
 155
 156        cb->buf_idx += mei_hdr->length;
 157
 158        if (mei_hdr->msg_complete) {
 159                cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx);
 160                list_move_tail(&cb->list, &complete_list->list);
 161        } else {
 162                pm_runtime_mark_last_busy(dev->dev);
 163                pm_request_autosuspend(dev->dev);
 164        }
 165
 166out:
 167        if (!buffer)
 168                mei_irq_discard_msg(dev, mei_hdr);
 169
 170        return 0;
 171}
 172
 173/**
 174 * mei_cl_irq_disconnect_rsp - send disconnection response message
 175 *
 176 * @cl: client
 177 * @cb: callback block.
 178 * @cmpl_list: complete list.
 179 *
 180 * Return: 0, OK; otherwise, error.
 181 */
 182static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
 183                                     struct mei_cl_cb *cmpl_list)
 184{
 185        struct mei_device *dev = cl->dev;
 186        u32 msg_slots;
 187        int slots;
 188        int ret;
 189
 190        slots = mei_hbuf_empty_slots(dev);
 191        msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response));
 192
 193        if (slots < msg_slots)
 194                return -EMSGSIZE;
 195
 196        ret = mei_hbm_cl_disconnect_rsp(dev, cl);
 197        mei_cl_set_disconnected(cl);
 198        mei_io_cb_free(cb);
 199        mei_me_cl_put(cl->me_cl);
 200        cl->me_cl = NULL;
 201
 202        return ret;
 203}
 204
 205/**
 206 * mei_cl_irq_read - processes client read related operation from the
 207 *      interrupt thread context - request for flow control credits
 208 *
 209 * @cl: client
 210 * @cb: callback block.
 211 * @cmpl_list: complete list.
 212 *
 213 * Return: 0, OK; otherwise, error.
 214 */
 215static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
 216                           struct mei_cl_cb *cmpl_list)
 217{
 218        struct mei_device *dev = cl->dev;
 219        u32 msg_slots;
 220        int slots;
 221        int ret;
 222
 223        msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
 224        slots = mei_hbuf_empty_slots(dev);
 225
 226        if (slots < msg_slots)
 227                return -EMSGSIZE;
 228
 229        ret = mei_hbm_cl_flow_control_req(dev, cl);
 230        if (ret) {
 231                cl->status = ret;
 232                cb->buf_idx = 0;
 233                list_move_tail(&cb->list, &cmpl_list->list);
 234                return ret;
 235        }
 236
 237        list_move_tail(&cb->list, &cl->rd_pending);
 238
 239        return 0;
 240}
 241
 242static inline bool hdr_is_hbm(struct mei_msg_hdr *mei_hdr)
 243{
 244        return mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0;
 245}
 246
 247static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
 248{
 249        return mei_hdr->host_addr == 0 && mei_hdr->me_addr != 0;
 250}
 251
 252/**
 253 * mei_irq_read_handler - bottom half read routine after ISR to
 254 * handle the read processing.
 255 *
 256 * @dev: the device structure
 257 * @cmpl_list: An instance of our list structure
 258 * @slots: slots to read.
 259 *
 260 * Return: 0 on success, <0 on failure.
 261 */
 262int mei_irq_read_handler(struct mei_device *dev,
 263                struct mei_cl_cb *cmpl_list, s32 *slots)
 264{
 265        struct mei_msg_hdr *mei_hdr;
 266        struct mei_cl *cl;
 267        int ret;
 268
 269        if (!dev->rd_msg_hdr) {
 270                dev->rd_msg_hdr = mei_read_hdr(dev);
 271                (*slots)--;
 272                dev_dbg(dev->dev, "slots =%08x.\n", *slots);
 273        }
 274        mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
 275        dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
 276
 277        if (mei_hdr->reserved || !dev->rd_msg_hdr) {
 278                dev_err(dev->dev, "corrupted message header 0x%08X\n",
 279                                dev->rd_msg_hdr);
 280                ret = -EBADMSG;
 281                goto end;
 282        }
 283
 284        if (mei_slots2data(*slots) < mei_hdr->length) {
 285                dev_err(dev->dev, "less data available than length=%08x.\n",
 286                                *slots);
 287                /* we can't read the message */
 288                ret = -ENODATA;
 289                goto end;
 290        }
 291
 292        /*  HBM message */
 293        if (hdr_is_hbm(mei_hdr)) {
 294                ret = mei_hbm_dispatch(dev, mei_hdr);
 295                if (ret) {
 296                        dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n",
 297                                        ret);
 298                        goto end;
 299                }
 300                goto reset_slots;
 301        }
 302
 303        /* find recipient cl */
 304        list_for_each_entry(cl, &dev->file_list, link) {
 305                if (mei_cl_hbm_equal(cl, mei_hdr)) {
 306                        cl_dbg(dev, cl, "got a message\n");
 307                        break;
 308                }
 309        }
 310
 311        /* if no recipient cl was found we assume corrupted header */
 312        if (&cl->link == &dev->file_list) {
 313                /* A message for not connected fixed address clients
 314                 * should be silently discarded
 315                 */
 316                if (hdr_is_fixed(mei_hdr)) {
 317                        mei_irq_discard_msg(dev, mei_hdr);
 318                        ret = 0;
 319                        goto reset_slots;
 320                }
 321                dev_err(dev->dev, "no destination client found 0x%08X\n",
 322                                dev->rd_msg_hdr);
 323                ret = -EBADMSG;
 324                goto end;
 325        }
 326
 327        if (cl == &dev->iamthif_cl) {
 328                ret = mei_amthif_irq_read_msg(cl, mei_hdr, cmpl_list);
 329        } else {
 330                ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
 331        }
 332
 333
 334reset_slots:
 335        /* reset the number of slots and header */
 336        *slots = mei_count_full_read_slots(dev);
 337        dev->rd_msg_hdr = 0;
 338
 339        if (*slots == -EOVERFLOW) {
 340                /* overflow - reset */
 341                dev_err(dev->dev, "resetting due to slots overflow.\n");
 342                /* set the event since message has been read */
 343                ret = -ERANGE;
 344                goto end;
 345        }
 346end:
 347        return ret;
 348}
 349EXPORT_SYMBOL_GPL(mei_irq_read_handler);
 350
 351
 352/**
 353 * mei_irq_write_handler -  dispatch write requests
 354 *  after irq received
 355 *
 356 * @dev: the device structure
 357 * @cmpl_list: An instance of our list structure
 358 *
 359 * Return: 0 on success, <0 on failure.
 360 */
 361int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
 362{
 363
 364        struct mei_cl *cl;
 365        struct mei_cl_cb *cb, *next;
 366        struct mei_cl_cb *list;
 367        s32 slots;
 368        int ret;
 369
 370
 371        if (!mei_hbuf_acquire(dev))
 372                return 0;
 373
 374        slots = mei_hbuf_empty_slots(dev);
 375        if (slots <= 0)
 376                return -EMSGSIZE;
 377
 378        /* complete all waiting for write CB */
 379        dev_dbg(dev->dev, "complete all waiting for write cb.\n");
 380
 381        list = &dev->write_waiting_list;
 382        list_for_each_entry_safe(cb, next, &list->list, list) {
 383                cl = cb->cl;
 384
 385                cl->status = 0;
 386                cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
 387                cl->writing_state = MEI_WRITE_COMPLETE;
 388                list_move_tail(&cb->list, &cmpl_list->list);
 389        }
 390
 391        /* complete control write list CB */
 392        dev_dbg(dev->dev, "complete control write list cb.\n");
 393        list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) {
 394                cl = cb->cl;
 395                switch (cb->fop_type) {
 396                case MEI_FOP_DISCONNECT:
 397                        /* send disconnect message */
 398                        ret = mei_cl_irq_disconnect(cl, cb, cmpl_list);
 399                        if (ret)
 400                                return ret;
 401
 402                        break;
 403                case MEI_FOP_READ:
 404                        /* send flow control message */
 405                        ret = mei_cl_irq_read(cl, cb, cmpl_list);
 406                        if (ret)
 407                                return ret;
 408
 409                        break;
 410                case MEI_FOP_CONNECT:
 411                        /* connect message */
 412                        ret = mei_cl_irq_connect(cl, cb, cmpl_list);
 413                        if (ret)
 414                                return ret;
 415
 416                        break;
 417                case MEI_FOP_DISCONNECT_RSP:
 418                        /* send disconnect resp */
 419                        ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list);
 420                        if (ret)
 421                                return ret;
 422                        break;
 423
 424                case MEI_FOP_NOTIFY_START:
 425                case MEI_FOP_NOTIFY_STOP:
 426                        ret = mei_cl_irq_notify(cl, cb, cmpl_list);
 427                        if (ret)
 428                                return ret;
 429                        break;
 430                default:
 431                        BUG();
 432                }
 433
 434        }
 435        /* complete  write list CB */
 436        dev_dbg(dev->dev, "complete write list cb.\n");
 437        list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
 438                cl = cb->cl;
 439                if (cl == &dev->iamthif_cl)
 440                        ret = mei_amthif_irq_write(cl, cb, cmpl_list);
 441                else
 442                        ret = mei_cl_irq_write(cl, cb, cmpl_list);
 443                if (ret)
 444                        return ret;
 445        }
 446        return 0;
 447}
 448EXPORT_SYMBOL_GPL(mei_irq_write_handler);
 449
 450
 451/**
 452 * mei_connect_timeout  - connect/disconnect timeouts
 453 *
 454 * @cl: host client
 455 */
 456static void mei_connect_timeout(struct mei_cl *cl)
 457{
 458        struct mei_device *dev = cl->dev;
 459
 460        if (cl->state == MEI_FILE_CONNECTING) {
 461                if (dev->hbm_f_dot_supported) {
 462                        cl->state = MEI_FILE_DISCONNECT_REQUIRED;
 463                        wake_up(&cl->wait);
 464                        return;
 465                }
 466        }
 467        mei_reset(dev);
 468}
 469
 470/**
 471 * mei_timer - timer function.
 472 *
 473 * @work: pointer to the work_struct structure
 474 *
 475 */
 476void mei_timer(struct work_struct *work)
 477{
 478        struct mei_cl *cl;
 479
 480        struct mei_device *dev = container_of(work,
 481                                        struct mei_device, timer_work.work);
 482
 483
 484        mutex_lock(&dev->device_lock);
 485
 486        /* Catch interrupt stalls during HBM init handshake */
 487        if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
 488            dev->hbm_state != MEI_HBM_IDLE) {
 489
 490                if (dev->init_clients_timer) {
 491                        if (--dev->init_clients_timer == 0) {
 492                                dev_err(dev->dev, "timer: init clients timeout hbm_state = %d.\n",
 493                                        dev->hbm_state);
 494                                mei_reset(dev);
 495                                goto out;
 496                        }
 497                }
 498        }
 499
 500        if (dev->dev_state != MEI_DEV_ENABLED)
 501                goto out;
 502
 503        /*** connect/disconnect timeouts ***/
 504        list_for_each_entry(cl, &dev->file_list, link) {
 505                if (cl->timer_count) {
 506                        if (--cl->timer_count == 0) {
 507                                dev_err(dev->dev, "timer: connect/disconnect timeout.\n");
 508                                mei_connect_timeout(cl);
 509                                goto out;
 510                        }
 511                }
 512        }
 513
 514        if (!mei_cl_is_connected(&dev->iamthif_cl))
 515                goto out;
 516
 517        if (dev->iamthif_stall_timer) {
 518                if (--dev->iamthif_stall_timer == 0) {
 519                        dev_err(dev->dev, "timer: amthif  hanged.\n");
 520                        mei_reset(dev);
 521                        dev->iamthif_canceled = false;
 522                        dev->iamthif_state = MEI_IAMTHIF_IDLE;
 523
 524                        mei_io_cb_free(dev->iamthif_current_cb);
 525                        dev->iamthif_current_cb = NULL;
 526
 527                        dev->iamthif_fp = NULL;
 528                        mei_amthif_run_next_cmd(dev);
 529                }
 530        }
 531
 532out:
 533        if (dev->dev_state != MEI_DEV_DISABLED)
 534                schedule_delayed_work(&dev->timer_work, 2 * HZ);
 535        mutex_unlock(&dev->device_lock);
 536}
 537