linux/drivers/media/cec/cec-api.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * cec-api.c - HDMI Consumer Electronics Control framework - API
   4 *
   5 * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
   6 */
   7
   8#include <linux/errno.h>
   9#include <linux/init.h>
  10#include <linux/module.h>
  11#include <linux/kernel.h>
  12#include <linux/kmod.h>
  13#include <linux/ktime.h>
  14#include <linux/slab.h>
  15#include <linux/mm.h>
  16#include <linux/string.h>
  17#include <linux/types.h>
  18#include <linux/uaccess.h>
  19#include <linux/version.h>
  20
  21#include <media/cec-pin.h>
  22#include "cec-priv.h"
  23#include "cec-pin-priv.h"
  24
  25static inline struct cec_devnode *cec_devnode_data(struct file *filp)
  26{
  27        struct cec_fh *fh = filp->private_data;
  28
  29        return &fh->adap->devnode;
  30}
  31
  32/* CEC file operations */
  33
  34static __poll_t cec_poll(struct file *filp,
  35                             struct poll_table_struct *poll)
  36{
  37        struct cec_fh *fh = filp->private_data;
  38        struct cec_adapter *adap = fh->adap;
  39        __poll_t res = 0;
  40
  41        poll_wait(filp, &fh->wait, poll);
  42        if (!cec_is_registered(adap))
  43                return EPOLLERR | EPOLLHUP;
  44        mutex_lock(&adap->lock);
  45        if (adap->is_configured &&
  46            adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
  47                res |= EPOLLOUT | EPOLLWRNORM;
  48        if (fh->queued_msgs)
  49                res |= EPOLLIN | EPOLLRDNORM;
  50        if (fh->total_queued_events)
  51                res |= EPOLLPRI;
  52        mutex_unlock(&adap->lock);
  53        return res;
  54}
  55
  56static bool cec_is_busy(const struct cec_adapter *adap,
  57                        const struct cec_fh *fh)
  58{
  59        bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
  60        bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
  61
  62        /*
  63         * Exclusive initiators and followers can always access the CEC adapter
  64         */
  65        if (valid_initiator || valid_follower)
  66                return false;
  67        /*
  68         * All others can only access the CEC adapter if there is no
  69         * exclusive initiator and they are in INITIATOR mode.
  70         */
  71        return adap->cec_initiator ||
  72               fh->mode_initiator == CEC_MODE_NO_INITIATOR;
  73}
  74
  75static long cec_adap_g_caps(struct cec_adapter *adap,
  76                            struct cec_caps __user *parg)
  77{
  78        struct cec_caps caps = {};
  79
  80        strscpy(caps.driver, adap->devnode.dev.parent->driver->name,
  81                sizeof(caps.driver));
  82        strscpy(caps.name, adap->name, sizeof(caps.name));
  83        caps.available_log_addrs = adap->available_log_addrs;
  84        caps.capabilities = adap->capabilities;
  85        caps.version = LINUX_VERSION_CODE;
  86        if (copy_to_user(parg, &caps, sizeof(caps)))
  87                return -EFAULT;
  88        return 0;
  89}
  90
  91static long cec_adap_g_phys_addr(struct cec_adapter *adap,
  92                                 __u16 __user *parg)
  93{
  94        u16 phys_addr;
  95
  96        mutex_lock(&adap->lock);
  97        phys_addr = adap->phys_addr;
  98        mutex_unlock(&adap->lock);
  99        if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
 100                return -EFAULT;
 101        return 0;
 102}
 103
 104static int cec_validate_phys_addr(u16 phys_addr)
 105{
 106        int i;
 107
 108        if (phys_addr == CEC_PHYS_ADDR_INVALID)
 109                return 0;
 110        for (i = 0; i < 16; i += 4)
 111                if (phys_addr & (0xf << i))
 112                        break;
 113        if (i == 16)
 114                return 0;
 115        for (i += 4; i < 16; i += 4)
 116                if ((phys_addr & (0xf << i)) == 0)
 117                        return -EINVAL;
 118        return 0;
 119}
 120
 121static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
 122                                 bool block, __u16 __user *parg)
 123{
 124        u16 phys_addr;
 125        long err;
 126
 127        if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
 128                return -ENOTTY;
 129        if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
 130                return -EFAULT;
 131
 132        err = cec_validate_phys_addr(phys_addr);
 133        if (err)
 134                return err;
 135        mutex_lock(&adap->lock);
 136        if (cec_is_busy(adap, fh))
 137                err = -EBUSY;
 138        else
 139                __cec_s_phys_addr(adap, phys_addr, block);
 140        mutex_unlock(&adap->lock);
 141        return err;
 142}
 143
 144static long cec_adap_g_log_addrs(struct cec_adapter *adap,
 145                                 struct cec_log_addrs __user *parg)
 146{
 147        struct cec_log_addrs log_addrs;
 148
 149        mutex_lock(&adap->lock);
 150        log_addrs = adap->log_addrs;
 151        if (!adap->is_configured)
 152                memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
 153                       sizeof(log_addrs.log_addr));
 154        mutex_unlock(&adap->lock);
 155
 156        if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
 157                return -EFAULT;
 158        return 0;
 159}
 160
 161static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
 162                                 bool block, struct cec_log_addrs __user *parg)
 163{
 164        struct cec_log_addrs log_addrs;
 165        long err = -EBUSY;
 166
 167        if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
 168                return -ENOTTY;
 169        if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
 170                return -EFAULT;
 171        log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK |
 172                           CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
 173                           CEC_LOG_ADDRS_FL_CDC_ONLY;
 174        mutex_lock(&adap->lock);
 175        if (!adap->is_configuring &&
 176            (!log_addrs.num_log_addrs || !adap->is_configured) &&
 177            !cec_is_busy(adap, fh)) {
 178                err = __cec_s_log_addrs(adap, &log_addrs, block);
 179                if (!err)
 180                        log_addrs = adap->log_addrs;
 181        }
 182        mutex_unlock(&adap->lock);
 183        if (err)
 184                return err;
 185        if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
 186                return -EFAULT;
 187        return 0;
 188}
 189
 190static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
 191                         bool block, struct cec_msg __user *parg)
 192{
 193        struct cec_msg msg = {};
 194        long err = 0;
 195
 196        if (!(adap->capabilities & CEC_CAP_TRANSMIT))
 197                return -ENOTTY;
 198        if (copy_from_user(&msg, parg, sizeof(msg)))
 199                return -EFAULT;
 200
 201        /* A CDC-Only device can only send CDC messages */
 202        if ((adap->log_addrs.flags & CEC_LOG_ADDRS_FL_CDC_ONLY) &&
 203            (msg.len == 1 || msg.msg[1] != CEC_MSG_CDC_MESSAGE))
 204                return -EINVAL;
 205
 206        mutex_lock(&adap->lock);
 207        if (adap->log_addrs.num_log_addrs == 0)
 208                err = -EPERM;
 209        else if (adap->is_configuring)
 210                err = -ENONET;
 211        else if (!adap->is_configured &&
 212                 (adap->needs_hpd || msg.msg[0] != 0xf0))
 213                err = -ENONET;
 214        else if (cec_is_busy(adap, fh))
 215                err = -EBUSY;
 216        else
 217                err = cec_transmit_msg_fh(adap, &msg, fh, block);
 218        mutex_unlock(&adap->lock);
 219        if (err)
 220                return err;
 221        if (copy_to_user(parg, &msg, sizeof(msg)))
 222                return -EFAULT;
 223        return 0;
 224}
 225
 226/* Called by CEC_RECEIVE: wait for a message to arrive */
 227static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
 228{
 229        u32 timeout = msg->timeout;
 230        int res;
 231
 232        do {
 233                mutex_lock(&fh->lock);
 234                /* Are there received messages queued up? */
 235                if (fh->queued_msgs) {
 236                        /* Yes, return the first one */
 237                        struct cec_msg_entry *entry =
 238                                list_first_entry(&fh->msgs,
 239                                                 struct cec_msg_entry, list);
 240
 241                        list_del(&entry->list);
 242                        *msg = entry->msg;
 243                        kfree(entry);
 244                        fh->queued_msgs--;
 245                        mutex_unlock(&fh->lock);
 246                        /* restore original timeout value */
 247                        msg->timeout = timeout;
 248                        return 0;
 249                }
 250
 251                /* No, return EAGAIN in non-blocking mode or wait */
 252                mutex_unlock(&fh->lock);
 253
 254                /* Return when in non-blocking mode */
 255                if (!block)
 256                        return -EAGAIN;
 257
 258                if (msg->timeout) {
 259                        /* The user specified a timeout */
 260                        res = wait_event_interruptible_timeout(fh->wait,
 261                                                               fh->queued_msgs,
 262                                msecs_to_jiffies(msg->timeout));
 263                        if (res == 0)
 264                                res = -ETIMEDOUT;
 265                        else if (res > 0)
 266                                res = 0;
 267                } else {
 268                        /* Wait indefinitely */
 269                        res = wait_event_interruptible(fh->wait,
 270                                                       fh->queued_msgs);
 271                }
 272                /* Exit on error, otherwise loop to get the new message */
 273        } while (!res);
 274        return res;
 275}
 276
 277static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
 278                        bool block, struct cec_msg __user *parg)
 279{
 280        struct cec_msg msg = {};
 281        long err;
 282
 283        if (copy_from_user(&msg, parg, sizeof(msg)))
 284                return -EFAULT;
 285
 286        err = cec_receive_msg(fh, &msg, block);
 287        if (err)
 288                return err;
 289        msg.flags = 0;
 290        if (copy_to_user(parg, &msg, sizeof(msg)))
 291                return -EFAULT;
 292        return 0;
 293}
 294
 295static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
 296                        bool block, struct cec_event __user *parg)
 297{
 298        struct cec_event_entry *ev = NULL;
 299        u64 ts = ~0ULL;
 300        unsigned int i;
 301        unsigned int ev_idx;
 302        long err = 0;
 303
 304        mutex_lock(&fh->lock);
 305        while (!fh->total_queued_events && block) {
 306                mutex_unlock(&fh->lock);
 307                err = wait_event_interruptible(fh->wait,
 308                                               fh->total_queued_events);
 309                if (err)
 310                        return err;
 311                mutex_lock(&fh->lock);
 312        }
 313
 314        /* Find the oldest event */
 315        for (i = 0; i < CEC_NUM_EVENTS; i++) {
 316                struct cec_event_entry *entry =
 317                        list_first_entry_or_null(&fh->events[i],
 318                                                 struct cec_event_entry, list);
 319
 320                if (entry && entry->ev.ts <= ts) {
 321                        ev = entry;
 322                        ev_idx = i;
 323                        ts = ev->ev.ts;
 324                }
 325        }
 326
 327        if (!ev) {
 328                err = -EAGAIN;
 329                goto unlock;
 330        }
 331        list_del(&ev->list);
 332
 333        if (copy_to_user(parg, &ev->ev, sizeof(ev->ev)))
 334                err = -EFAULT;
 335        if (ev_idx >= CEC_NUM_CORE_EVENTS)
 336                kfree(ev);
 337        fh->queued_events[ev_idx]--;
 338        fh->total_queued_events--;
 339
 340unlock:
 341        mutex_unlock(&fh->lock);
 342        return err;
 343}
 344
 345static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
 346                       u32 __user *parg)
 347{
 348        u32 mode = fh->mode_initiator | fh->mode_follower;
 349
 350        if (copy_to_user(parg, &mode, sizeof(mode)))
 351                return -EFAULT;
 352        return 0;
 353}
 354
 355static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
 356                       u32 __user *parg)
 357{
 358        u32 mode;
 359        u8 mode_initiator;
 360        u8 mode_follower;
 361        bool send_pin_event = false;
 362        long err = 0;
 363
 364        if (copy_from_user(&mode, parg, sizeof(mode)))
 365                return -EFAULT;
 366        if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK)) {
 367                dprintk(1, "%s: invalid mode bits set\n", __func__);
 368                return -EINVAL;
 369        }
 370
 371        mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
 372        mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
 373
 374        if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
 375            mode_follower > CEC_MODE_MONITOR_ALL) {
 376                dprintk(1, "%s: unknown mode\n", __func__);
 377                return -EINVAL;
 378        }
 379
 380        if (mode_follower == CEC_MODE_MONITOR_ALL &&
 381            !(adap->capabilities & CEC_CAP_MONITOR_ALL)) {
 382                dprintk(1, "%s: MONITOR_ALL not supported\n", __func__);
 383                return -EINVAL;
 384        }
 385
 386        if (mode_follower == CEC_MODE_MONITOR_PIN &&
 387            !(adap->capabilities & CEC_CAP_MONITOR_PIN)) {
 388                dprintk(1, "%s: MONITOR_PIN not supported\n", __func__);
 389                return -EINVAL;
 390        }
 391
 392        /* Follower modes should always be able to send CEC messages */
 393        if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
 394             !(adap->capabilities & CEC_CAP_TRANSMIT)) &&
 395            mode_follower >= CEC_MODE_FOLLOWER &&
 396            mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
 397                dprintk(1, "%s: cannot transmit\n", __func__);
 398                return -EINVAL;
 399        }
 400
 401        /* Monitor modes require CEC_MODE_NO_INITIATOR */
 402        if (mode_initiator && mode_follower >= CEC_MODE_MONITOR_PIN) {
 403                dprintk(1, "%s: monitor modes require NO_INITIATOR\n",
 404                        __func__);
 405                return -EINVAL;
 406        }
 407
 408        /* Monitor modes require CAP_NET_ADMIN */
 409        if (mode_follower >= CEC_MODE_MONITOR_PIN && !capable(CAP_NET_ADMIN))
 410                return -EPERM;
 411
 412        mutex_lock(&adap->lock);
 413        /*
 414         * You can't become exclusive follower if someone else already
 415         * has that job.
 416         */
 417        if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
 418             mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
 419            adap->cec_follower && adap->cec_follower != fh)
 420                err = -EBUSY;
 421        /*
 422         * You can't become exclusive initiator if someone else already
 423         * has that job.
 424         */
 425        if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
 426            adap->cec_initiator && adap->cec_initiator != fh)
 427                err = -EBUSY;
 428
 429        if (!err) {
 430                bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
 431                bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
 432
 433                if (old_mon_all != new_mon_all) {
 434                        if (new_mon_all)
 435                                err = cec_monitor_all_cnt_inc(adap);
 436                        else
 437                                cec_monitor_all_cnt_dec(adap);
 438                }
 439        }
 440
 441        if (!err) {
 442                bool old_mon_pin = fh->mode_follower == CEC_MODE_MONITOR_PIN;
 443                bool new_mon_pin = mode_follower == CEC_MODE_MONITOR_PIN;
 444
 445                if (old_mon_pin != new_mon_pin) {
 446                        send_pin_event = new_mon_pin;
 447                        if (new_mon_pin)
 448                                err = cec_monitor_pin_cnt_inc(adap);
 449                        else
 450                                cec_monitor_pin_cnt_dec(adap);
 451                }
 452        }
 453
 454        if (err) {
 455                mutex_unlock(&adap->lock);
 456                return err;
 457        }
 458
 459        if (fh->mode_follower == CEC_MODE_FOLLOWER)
 460                adap->follower_cnt--;
 461        if (mode_follower == CEC_MODE_FOLLOWER)
 462                adap->follower_cnt++;
 463        if (send_pin_event) {
 464                struct cec_event ev = {
 465                        .flags = CEC_EVENT_FL_INITIAL_STATE,
 466                };
 467
 468                ev.event = adap->cec_pin_is_high ? CEC_EVENT_PIN_CEC_HIGH :
 469                                                   CEC_EVENT_PIN_CEC_LOW;
 470                cec_queue_event_fh(fh, &ev, 0);
 471        }
 472        if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
 473            mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
 474                adap->passthrough =
 475                        mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
 476                adap->cec_follower = fh;
 477        } else if (adap->cec_follower == fh) {
 478                adap->passthrough = false;
 479                adap->cec_follower = NULL;
 480        }
 481        if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
 482                adap->cec_initiator = fh;
 483        else if (adap->cec_initiator == fh)
 484                adap->cec_initiator = NULL;
 485        fh->mode_initiator = mode_initiator;
 486        fh->mode_follower = mode_follower;
 487        mutex_unlock(&adap->lock);
 488        return 0;
 489}
 490
 491static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 492{
 493        struct cec_fh *fh = filp->private_data;
 494        struct cec_adapter *adap = fh->adap;
 495        bool block = !(filp->f_flags & O_NONBLOCK);
 496        void __user *parg = (void __user *)arg;
 497
 498        if (!cec_is_registered(adap))
 499                return -ENODEV;
 500
 501        switch (cmd) {
 502        case CEC_ADAP_G_CAPS:
 503                return cec_adap_g_caps(adap, parg);
 504
 505        case CEC_ADAP_G_PHYS_ADDR:
 506                return cec_adap_g_phys_addr(adap, parg);
 507
 508        case CEC_ADAP_S_PHYS_ADDR:
 509                return cec_adap_s_phys_addr(adap, fh, block, parg);
 510
 511        case CEC_ADAP_G_LOG_ADDRS:
 512                return cec_adap_g_log_addrs(adap, parg);
 513
 514        case CEC_ADAP_S_LOG_ADDRS:
 515                return cec_adap_s_log_addrs(adap, fh, block, parg);
 516
 517        case CEC_TRANSMIT:
 518                return cec_transmit(adap, fh, block, parg);
 519
 520        case CEC_RECEIVE:
 521                return cec_receive(adap, fh, block, parg);
 522
 523        case CEC_DQEVENT:
 524                return cec_dqevent(adap, fh, block, parg);
 525
 526        case CEC_G_MODE:
 527                return cec_g_mode(adap, fh, parg);
 528
 529        case CEC_S_MODE:
 530                return cec_s_mode(adap, fh, parg);
 531
 532        default:
 533                return -ENOTTY;
 534        }
 535}
 536
 537static int cec_open(struct inode *inode, struct file *filp)
 538{
 539        struct cec_devnode *devnode =
 540                container_of(inode->i_cdev, struct cec_devnode, cdev);
 541        struct cec_adapter *adap = to_cec_adapter(devnode);
 542        struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
 543        /*
 544         * Initial events that are automatically sent when the cec device is
 545         * opened.
 546         */
 547        struct cec_event ev = {
 548                .event = CEC_EVENT_STATE_CHANGE,
 549                .flags = CEC_EVENT_FL_INITIAL_STATE,
 550        };
 551        unsigned int i;
 552        int err;
 553
 554        if (!fh)
 555                return -ENOMEM;
 556
 557        INIT_LIST_HEAD(&fh->msgs);
 558        INIT_LIST_HEAD(&fh->xfer_list);
 559        for (i = 0; i < CEC_NUM_EVENTS; i++)
 560                INIT_LIST_HEAD(&fh->events[i]);
 561        mutex_init(&fh->lock);
 562        init_waitqueue_head(&fh->wait);
 563
 564        fh->mode_initiator = CEC_MODE_INITIATOR;
 565        fh->adap = adap;
 566
 567        err = cec_get_device(devnode);
 568        if (err) {
 569                kfree(fh);
 570                return err;
 571        }
 572
 573        mutex_lock(&devnode->lock);
 574        if (list_empty(&devnode->fhs) &&
 575            !adap->needs_hpd &&
 576            adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
 577                err = adap->ops->adap_enable(adap, true);
 578                if (err) {
 579                        mutex_unlock(&devnode->lock);
 580                        kfree(fh);
 581                        return err;
 582                }
 583        }
 584        filp->private_data = fh;
 585
 586        /* Queue up initial state events */
 587        ev.state_change.phys_addr = adap->phys_addr;
 588        ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
 589        cec_queue_event_fh(fh, &ev, 0);
 590#ifdef CONFIG_CEC_PIN
 591        if (adap->pin && adap->pin->ops->read_hpd) {
 592                err = adap->pin->ops->read_hpd(adap);
 593                if (err >= 0) {
 594                        ev.event = err ? CEC_EVENT_PIN_HPD_HIGH :
 595                                         CEC_EVENT_PIN_HPD_LOW;
 596                        cec_queue_event_fh(fh, &ev, 0);
 597                }
 598        }
 599        if (adap->pin && adap->pin->ops->read_5v) {
 600                err = adap->pin->ops->read_5v(adap);
 601                if (err >= 0) {
 602                        ev.event = err ? CEC_EVENT_PIN_5V_HIGH :
 603                                         CEC_EVENT_PIN_5V_LOW;
 604                        cec_queue_event_fh(fh, &ev, 0);
 605                }
 606        }
 607#endif
 608
 609        list_add(&fh->list, &devnode->fhs);
 610        mutex_unlock(&devnode->lock);
 611
 612        return 0;
 613}
 614
 615/* Override for the release function */
 616static int cec_release(struct inode *inode, struct file *filp)
 617{
 618        struct cec_devnode *devnode = cec_devnode_data(filp);
 619        struct cec_adapter *adap = to_cec_adapter(devnode);
 620        struct cec_fh *fh = filp->private_data;
 621        unsigned int i;
 622
 623        mutex_lock(&adap->lock);
 624        if (adap->cec_initiator == fh)
 625                adap->cec_initiator = NULL;
 626        if (adap->cec_follower == fh) {
 627                adap->cec_follower = NULL;
 628                adap->passthrough = false;
 629        }
 630        if (fh->mode_follower == CEC_MODE_FOLLOWER)
 631                adap->follower_cnt--;
 632        if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
 633                cec_monitor_pin_cnt_dec(adap);
 634        if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
 635                cec_monitor_all_cnt_dec(adap);
 636        mutex_unlock(&adap->lock);
 637
 638        mutex_lock(&devnode->lock);
 639        list_del(&fh->list);
 640        if (cec_is_registered(adap) && list_empty(&devnode->fhs) &&
 641            !adap->needs_hpd && adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
 642                WARN_ON(adap->ops->adap_enable(adap, false));
 643        }
 644        mutex_unlock(&devnode->lock);
 645
 646        /* Unhook pending transmits from this filehandle. */
 647        mutex_lock(&adap->lock);
 648        while (!list_empty(&fh->xfer_list)) {
 649                struct cec_data *data =
 650                        list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
 651
 652                data->blocking = false;
 653                data->fh = NULL;
 654                list_del(&data->xfer_list);
 655        }
 656        mutex_unlock(&adap->lock);
 657        while (!list_empty(&fh->msgs)) {
 658                struct cec_msg_entry *entry =
 659                        list_first_entry(&fh->msgs, struct cec_msg_entry, list);
 660
 661                list_del(&entry->list);
 662                kfree(entry);
 663        }
 664        for (i = CEC_NUM_CORE_EVENTS; i < CEC_NUM_EVENTS; i++) {
 665                while (!list_empty(&fh->events[i])) {
 666                        struct cec_event_entry *entry =
 667                                list_first_entry(&fh->events[i],
 668                                                 struct cec_event_entry, list);
 669
 670                        list_del(&entry->list);
 671                        kfree(entry);
 672                }
 673        }
 674        kfree(fh);
 675
 676        cec_put_device(devnode);
 677        filp->private_data = NULL;
 678        return 0;
 679}
 680
 681const struct file_operations cec_devnode_fops = {
 682        .owner = THIS_MODULE,
 683        .open = cec_open,
 684        .unlocked_ioctl = cec_ioctl,
 685        .compat_ioctl = cec_ioctl,
 686        .release = cec_release,
 687        .poll = cec_poll,
 688        .llseek = no_llseek,
 689};
 690