linux/drivers/media/cec/cec-api.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * cec-api.c - HDMI Consumer Electronics Control framework - API
   4 *
   5 * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
   6 */
   7
   8#include <linux/errno.h>
   9#include <linux/init.h>
  10#include <linux/module.h>
  11#include <linux/kernel.h>
  12#include <linux/kmod.h>
  13#include <linux/ktime.h>
  14#include <linux/slab.h>
  15#include <linux/mm.h>
  16#include <linux/string.h>
  17#include <linux/types.h>
  18#include <linux/uaccess.h>
  19#include <linux/version.h>
  20
  21#include <media/cec-pin.h>
  22#include "cec-priv.h"
  23#include "cec-pin-priv.h"
  24
  25static inline struct cec_devnode *cec_devnode_data(struct file *filp)
  26{
  27        struct cec_fh *fh = filp->private_data;
  28
  29        return &fh->adap->devnode;
  30}
  31
  32/* CEC file operations */
  33
  34static __poll_t cec_poll(struct file *filp,
  35                             struct poll_table_struct *poll)
  36{
  37        struct cec_fh *fh = filp->private_data;
  38        struct cec_adapter *adap = fh->adap;
  39        __poll_t res = 0;
  40
  41        if (!cec_is_registered(adap))
  42                return EPOLLERR | EPOLLHUP;
  43        mutex_lock(&adap->lock);
  44        if (adap->is_configured &&
  45            adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
  46                res |= EPOLLOUT | EPOLLWRNORM;
  47        if (fh->queued_msgs)
  48                res |= EPOLLIN | EPOLLRDNORM;
  49        if (fh->total_queued_events)
  50                res |= EPOLLPRI;
  51        poll_wait(filp, &fh->wait, poll);
  52        mutex_unlock(&adap->lock);
  53        return res;
  54}
  55
  56static bool cec_is_busy(const struct cec_adapter *adap,
  57                        const struct cec_fh *fh)
  58{
  59        bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
  60        bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
  61
  62        /*
  63         * Exclusive initiators and followers can always access the CEC adapter
  64         */
  65        if (valid_initiator || valid_follower)
  66                return false;
  67        /*
  68         * All others can only access the CEC adapter if there is no
  69         * exclusive initiator and they are in INITIATOR mode.
  70         */
  71        return adap->cec_initiator ||
  72               fh->mode_initiator == CEC_MODE_NO_INITIATOR;
  73}
  74
  75static long cec_adap_g_caps(struct cec_adapter *adap,
  76                            struct cec_caps __user *parg)
  77{
  78        struct cec_caps caps = {};
  79
  80        strlcpy(caps.driver, adap->devnode.dev.parent->driver->name,
  81                sizeof(caps.driver));
  82        strlcpy(caps.name, adap->name, sizeof(caps.name));
  83        caps.available_log_addrs = adap->available_log_addrs;
  84        caps.capabilities = adap->capabilities;
  85        caps.version = LINUX_VERSION_CODE;
  86        if (copy_to_user(parg, &caps, sizeof(caps)))
  87                return -EFAULT;
  88        return 0;
  89}
  90
  91static long cec_adap_g_phys_addr(struct cec_adapter *adap,
  92                                 __u16 __user *parg)
  93{
  94        u16 phys_addr;
  95
  96        mutex_lock(&adap->lock);
  97        phys_addr = adap->phys_addr;
  98        mutex_unlock(&adap->lock);
  99        if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
 100                return -EFAULT;
 101        return 0;
 102}
 103
 104static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
 105                                 bool block, __u16 __user *parg)
 106{
 107        u16 phys_addr;
 108        long err;
 109
 110        if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
 111                return -ENOTTY;
 112        if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
 113                return -EFAULT;
 114
 115        err = cec_phys_addr_validate(phys_addr, NULL, NULL);
 116        if (err)
 117                return err;
 118        mutex_lock(&adap->lock);
 119        if (cec_is_busy(adap, fh))
 120                err = -EBUSY;
 121        else
 122                __cec_s_phys_addr(adap, phys_addr, block);
 123        mutex_unlock(&adap->lock);
 124        return err;
 125}
 126
 127static long cec_adap_g_log_addrs(struct cec_adapter *adap,
 128                                 struct cec_log_addrs __user *parg)
 129{
 130        struct cec_log_addrs log_addrs;
 131
 132        mutex_lock(&adap->lock);
 133        log_addrs = adap->log_addrs;
 134        if (!adap->is_configured)
 135                memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
 136                       sizeof(log_addrs.log_addr));
 137        mutex_unlock(&adap->lock);
 138
 139        if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
 140                return -EFAULT;
 141        return 0;
 142}
 143
 144static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
 145                                 bool block, struct cec_log_addrs __user *parg)
 146{
 147        struct cec_log_addrs log_addrs;
 148        long err = -EBUSY;
 149
 150        if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
 151                return -ENOTTY;
 152        if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
 153                return -EFAULT;
 154        log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK |
 155                           CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
 156                           CEC_LOG_ADDRS_FL_CDC_ONLY;
 157        mutex_lock(&adap->lock);
 158        if (!adap->is_configuring &&
 159            (!log_addrs.num_log_addrs || !adap->is_configured) &&
 160            !cec_is_busy(adap, fh)) {
 161                err = __cec_s_log_addrs(adap, &log_addrs, block);
 162                if (!err)
 163                        log_addrs = adap->log_addrs;
 164        }
 165        mutex_unlock(&adap->lock);
 166        if (err)
 167                return err;
 168        if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
 169                return -EFAULT;
 170        return 0;
 171}
 172
 173static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
 174                         bool block, struct cec_msg __user *parg)
 175{
 176        struct cec_msg msg = {};
 177        long err = 0;
 178
 179        if (!(adap->capabilities & CEC_CAP_TRANSMIT))
 180                return -ENOTTY;
 181        if (copy_from_user(&msg, parg, sizeof(msg)))
 182                return -EFAULT;
 183
 184        /* A CDC-Only device can only send CDC messages */
 185        if ((adap->log_addrs.flags & CEC_LOG_ADDRS_FL_CDC_ONLY) &&
 186            (msg.len == 1 || msg.msg[1] != CEC_MSG_CDC_MESSAGE))
 187                return -EINVAL;
 188
 189        mutex_lock(&adap->lock);
 190        if (adap->log_addrs.num_log_addrs == 0)
 191                err = -EPERM;
 192        else if (adap->is_configuring)
 193                err = -ENONET;
 194        else if (!adap->is_configured &&
 195                 (adap->needs_hpd || msg.msg[0] != 0xf0))
 196                err = -ENONET;
 197        else if (cec_is_busy(adap, fh))
 198                err = -EBUSY;
 199        else
 200                err = cec_transmit_msg_fh(adap, &msg, fh, block);
 201        mutex_unlock(&adap->lock);
 202        if (err)
 203                return err;
 204        if (copy_to_user(parg, &msg, sizeof(msg)))
 205                return -EFAULT;
 206        return 0;
 207}
 208
 209/* Called by CEC_RECEIVE: wait for a message to arrive */
 210static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
 211{
 212        u32 timeout = msg->timeout;
 213        int res;
 214
 215        do {
 216                mutex_lock(&fh->lock);
 217                /* Are there received messages queued up? */
 218                if (fh->queued_msgs) {
 219                        /* Yes, return the first one */
 220                        struct cec_msg_entry *entry =
 221                                list_first_entry(&fh->msgs,
 222                                                 struct cec_msg_entry, list);
 223
 224                        list_del(&entry->list);
 225                        *msg = entry->msg;
 226                        kfree(entry);
 227                        fh->queued_msgs--;
 228                        mutex_unlock(&fh->lock);
 229                        /* restore original timeout value */
 230                        msg->timeout = timeout;
 231                        return 0;
 232                }
 233
 234                /* No, return EAGAIN in non-blocking mode or wait */
 235                mutex_unlock(&fh->lock);
 236
 237                /* Return when in non-blocking mode */
 238                if (!block)
 239                        return -EAGAIN;
 240
 241                if (msg->timeout) {
 242                        /* The user specified a timeout */
 243                        res = wait_event_interruptible_timeout(fh->wait,
 244                                                               fh->queued_msgs,
 245                                msecs_to_jiffies(msg->timeout));
 246                        if (res == 0)
 247                                res = -ETIMEDOUT;
 248                        else if (res > 0)
 249                                res = 0;
 250                } else {
 251                        /* Wait indefinitely */
 252                        res = wait_event_interruptible(fh->wait,
 253                                                       fh->queued_msgs);
 254                }
 255                /* Exit on error, otherwise loop to get the new message */
 256        } while (!res);
 257        return res;
 258}
 259
 260static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
 261                        bool block, struct cec_msg __user *parg)
 262{
 263        struct cec_msg msg = {};
 264        long err;
 265
 266        if (copy_from_user(&msg, parg, sizeof(msg)))
 267                return -EFAULT;
 268
 269        err = cec_receive_msg(fh, &msg, block);
 270        if (err)
 271                return err;
 272        msg.flags = 0;
 273        if (copy_to_user(parg, &msg, sizeof(msg)))
 274                return -EFAULT;
 275        return 0;
 276}
 277
 278static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
 279                        bool block, struct cec_event __user *parg)
 280{
 281        struct cec_event_entry *ev = NULL;
 282        u64 ts = ~0ULL;
 283        unsigned int i;
 284        unsigned int ev_idx;
 285        long err = 0;
 286
 287        mutex_lock(&fh->lock);
 288        while (!fh->total_queued_events && block) {
 289                mutex_unlock(&fh->lock);
 290                err = wait_event_interruptible(fh->wait,
 291                                               fh->total_queued_events);
 292                if (err)
 293                        return err;
 294                mutex_lock(&fh->lock);
 295        }
 296
 297        /* Find the oldest event */
 298        for (i = 0; i < CEC_NUM_EVENTS; i++) {
 299                struct cec_event_entry *entry =
 300                        list_first_entry_or_null(&fh->events[i],
 301                                                 struct cec_event_entry, list);
 302
 303                if (entry && entry->ev.ts <= ts) {
 304                        ev = entry;
 305                        ev_idx = i;
 306                        ts = ev->ev.ts;
 307                }
 308        }
 309
 310        if (!ev) {
 311                err = -EAGAIN;
 312                goto unlock;
 313        }
 314        list_del(&ev->list);
 315
 316        if (copy_to_user(parg, &ev->ev, sizeof(ev->ev)))
 317                err = -EFAULT;
 318        if (ev_idx >= CEC_NUM_CORE_EVENTS)
 319                kfree(ev);
 320        fh->queued_events[ev_idx]--;
 321        fh->total_queued_events--;
 322
 323unlock:
 324        mutex_unlock(&fh->lock);
 325        return err;
 326}
 327
 328static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
 329                       u32 __user *parg)
 330{
 331        u32 mode = fh->mode_initiator | fh->mode_follower;
 332
 333        if (copy_to_user(parg, &mode, sizeof(mode)))
 334                return -EFAULT;
 335        return 0;
 336}
 337
 338static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
 339                       u32 __user *parg)
 340{
 341        u32 mode;
 342        u8 mode_initiator;
 343        u8 mode_follower;
 344        bool send_pin_event = false;
 345        long err = 0;
 346
 347        if (copy_from_user(&mode, parg, sizeof(mode)))
 348                return -EFAULT;
 349        if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK)) {
 350                dprintk(1, "%s: invalid mode bits set\n", __func__);
 351                return -EINVAL;
 352        }
 353
 354        mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
 355        mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
 356
 357        if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
 358            mode_follower > CEC_MODE_MONITOR_ALL) {
 359                dprintk(1, "%s: unknown mode\n", __func__);
 360                return -EINVAL;
 361        }
 362
 363        if (mode_follower == CEC_MODE_MONITOR_ALL &&
 364            !(adap->capabilities & CEC_CAP_MONITOR_ALL)) {
 365                dprintk(1, "%s: MONITOR_ALL not supported\n", __func__);
 366                return -EINVAL;
 367        }
 368
 369        if (mode_follower == CEC_MODE_MONITOR_PIN &&
 370            !(adap->capabilities & CEC_CAP_MONITOR_PIN)) {
 371                dprintk(1, "%s: MONITOR_PIN not supported\n", __func__);
 372                return -EINVAL;
 373        }
 374
 375        /* Follower modes should always be able to send CEC messages */
 376        if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
 377             !(adap->capabilities & CEC_CAP_TRANSMIT)) &&
 378            mode_follower >= CEC_MODE_FOLLOWER &&
 379            mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
 380                dprintk(1, "%s: cannot transmit\n", __func__);
 381                return -EINVAL;
 382        }
 383
 384        /* Monitor modes require CEC_MODE_NO_INITIATOR */
 385        if (mode_initiator && mode_follower >= CEC_MODE_MONITOR_PIN) {
 386                dprintk(1, "%s: monitor modes require NO_INITIATOR\n",
 387                        __func__);
 388                return -EINVAL;
 389        }
 390
 391        /* Monitor modes require CAP_NET_ADMIN */
 392        if (mode_follower >= CEC_MODE_MONITOR_PIN && !capable(CAP_NET_ADMIN))
 393                return -EPERM;
 394
 395        mutex_lock(&adap->lock);
 396        /*
 397         * You can't become exclusive follower if someone else already
 398         * has that job.
 399         */
 400        if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
 401             mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
 402            adap->cec_follower && adap->cec_follower != fh)
 403                err = -EBUSY;
 404        /*
 405         * You can't become exclusive initiator if someone else already
 406         * has that job.
 407         */
 408        if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
 409            adap->cec_initiator && adap->cec_initiator != fh)
 410                err = -EBUSY;
 411
 412        if (!err) {
 413                bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
 414                bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
 415
 416                if (old_mon_all != new_mon_all) {
 417                        if (new_mon_all)
 418                                err = cec_monitor_all_cnt_inc(adap);
 419                        else
 420                                cec_monitor_all_cnt_dec(adap);
 421                }
 422        }
 423
 424        if (!err) {
 425                bool old_mon_pin = fh->mode_follower == CEC_MODE_MONITOR_PIN;
 426                bool new_mon_pin = mode_follower == CEC_MODE_MONITOR_PIN;
 427
 428                if (old_mon_pin != new_mon_pin) {
 429                        send_pin_event = new_mon_pin;
 430                        if (new_mon_pin)
 431                                err = cec_monitor_pin_cnt_inc(adap);
 432                        else
 433                                cec_monitor_pin_cnt_dec(adap);
 434                }
 435        }
 436
 437        if (err) {
 438                mutex_unlock(&adap->lock);
 439                return err;
 440        }
 441
 442        if (fh->mode_follower == CEC_MODE_FOLLOWER)
 443                adap->follower_cnt--;
 444        if (mode_follower == CEC_MODE_FOLLOWER)
 445                adap->follower_cnt++;
 446        if (send_pin_event) {
 447                struct cec_event ev = {
 448                        .flags = CEC_EVENT_FL_INITIAL_STATE,
 449                };
 450
 451                ev.event = adap->cec_pin_is_high ? CEC_EVENT_PIN_CEC_HIGH :
 452                                                   CEC_EVENT_PIN_CEC_LOW;
 453                cec_queue_event_fh(fh, &ev, 0);
 454        }
 455        if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
 456            mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
 457                adap->passthrough =
 458                        mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
 459                adap->cec_follower = fh;
 460        } else if (adap->cec_follower == fh) {
 461                adap->passthrough = false;
 462                adap->cec_follower = NULL;
 463        }
 464        if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
 465                adap->cec_initiator = fh;
 466        else if (adap->cec_initiator == fh)
 467                adap->cec_initiator = NULL;
 468        fh->mode_initiator = mode_initiator;
 469        fh->mode_follower = mode_follower;
 470        mutex_unlock(&adap->lock);
 471        return 0;
 472}
 473
 474static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 475{
 476        struct cec_fh *fh = filp->private_data;
 477        struct cec_adapter *adap = fh->adap;
 478        bool block = !(filp->f_flags & O_NONBLOCK);
 479        void __user *parg = (void __user *)arg;
 480
 481        if (!cec_is_registered(adap))
 482                return -ENODEV;
 483
 484        switch (cmd) {
 485        case CEC_ADAP_G_CAPS:
 486                return cec_adap_g_caps(adap, parg);
 487
 488        case CEC_ADAP_G_PHYS_ADDR:
 489                return cec_adap_g_phys_addr(adap, parg);
 490
 491        case CEC_ADAP_S_PHYS_ADDR:
 492                return cec_adap_s_phys_addr(adap, fh, block, parg);
 493
 494        case CEC_ADAP_G_LOG_ADDRS:
 495                return cec_adap_g_log_addrs(adap, parg);
 496
 497        case CEC_ADAP_S_LOG_ADDRS:
 498                return cec_adap_s_log_addrs(adap, fh, block, parg);
 499
 500        case CEC_TRANSMIT:
 501                return cec_transmit(adap, fh, block, parg);
 502
 503        case CEC_RECEIVE:
 504                return cec_receive(adap, fh, block, parg);
 505
 506        case CEC_DQEVENT:
 507                return cec_dqevent(adap, fh, block, parg);
 508
 509        case CEC_G_MODE:
 510                return cec_g_mode(adap, fh, parg);
 511
 512        case CEC_S_MODE:
 513                return cec_s_mode(adap, fh, parg);
 514
 515        default:
 516                return -ENOTTY;
 517        }
 518}
 519
 520static int cec_open(struct inode *inode, struct file *filp)
 521{
 522        struct cec_devnode *devnode =
 523                container_of(inode->i_cdev, struct cec_devnode, cdev);
 524        struct cec_adapter *adap = to_cec_adapter(devnode);
 525        struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
 526        /*
 527         * Initial events that are automatically sent when the cec device is
 528         * opened.
 529         */
 530        struct cec_event ev = {
 531                .event = CEC_EVENT_STATE_CHANGE,
 532                .flags = CEC_EVENT_FL_INITIAL_STATE,
 533        };
 534        unsigned int i;
 535        int err;
 536
 537        if (!fh)
 538                return -ENOMEM;
 539
 540        INIT_LIST_HEAD(&fh->msgs);
 541        INIT_LIST_HEAD(&fh->xfer_list);
 542        for (i = 0; i < CEC_NUM_EVENTS; i++)
 543                INIT_LIST_HEAD(&fh->events[i]);
 544        mutex_init(&fh->lock);
 545        init_waitqueue_head(&fh->wait);
 546
 547        fh->mode_initiator = CEC_MODE_INITIATOR;
 548        fh->adap = adap;
 549
 550        err = cec_get_device(devnode);
 551        if (err) {
 552                kfree(fh);
 553                return err;
 554        }
 555
 556        mutex_lock(&devnode->lock);
 557        if (list_empty(&devnode->fhs) &&
 558            !adap->needs_hpd &&
 559            adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
 560                err = adap->ops->adap_enable(adap, true);
 561                if (err) {
 562                        mutex_unlock(&devnode->lock);
 563                        kfree(fh);
 564                        return err;
 565                }
 566        }
 567        filp->private_data = fh;
 568
 569        /* Queue up initial state events */
 570        ev.state_change.phys_addr = adap->phys_addr;
 571        ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
 572        cec_queue_event_fh(fh, &ev, 0);
 573#ifdef CONFIG_CEC_PIN
 574        if (adap->pin && adap->pin->ops->read_hpd) {
 575                err = adap->pin->ops->read_hpd(adap);
 576                if (err >= 0) {
 577                        ev.event = err ? CEC_EVENT_PIN_HPD_HIGH :
 578                                         CEC_EVENT_PIN_HPD_LOW;
 579                        cec_queue_event_fh(fh, &ev, 0);
 580                }
 581        }
 582#endif
 583
 584        list_add(&fh->list, &devnode->fhs);
 585        mutex_unlock(&devnode->lock);
 586
 587        return 0;
 588}
 589
 590/* Override for the release function */
 591static int cec_release(struct inode *inode, struct file *filp)
 592{
 593        struct cec_devnode *devnode = cec_devnode_data(filp);
 594        struct cec_adapter *adap = to_cec_adapter(devnode);
 595        struct cec_fh *fh = filp->private_data;
 596        unsigned int i;
 597
 598        mutex_lock(&adap->lock);
 599        if (adap->cec_initiator == fh)
 600                adap->cec_initiator = NULL;
 601        if (adap->cec_follower == fh) {
 602                adap->cec_follower = NULL;
 603                adap->passthrough = false;
 604        }
 605        if (fh->mode_follower == CEC_MODE_FOLLOWER)
 606                adap->follower_cnt--;
 607        if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
 608                cec_monitor_pin_cnt_dec(adap);
 609        if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
 610                cec_monitor_all_cnt_dec(adap);
 611        mutex_unlock(&adap->lock);
 612
 613        mutex_lock(&devnode->lock);
 614        list_del(&fh->list);
 615        if (cec_is_registered(adap) && list_empty(&devnode->fhs) &&
 616            !adap->needs_hpd && adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
 617                WARN_ON(adap->ops->adap_enable(adap, false));
 618        }
 619        mutex_unlock(&devnode->lock);
 620
 621        /* Unhook pending transmits from this filehandle. */
 622        mutex_lock(&adap->lock);
 623        while (!list_empty(&fh->xfer_list)) {
 624                struct cec_data *data =
 625                        list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
 626
 627                data->blocking = false;
 628                data->fh = NULL;
 629                list_del(&data->xfer_list);
 630        }
 631        mutex_unlock(&adap->lock);
 632        while (!list_empty(&fh->msgs)) {
 633                struct cec_msg_entry *entry =
 634                        list_first_entry(&fh->msgs, struct cec_msg_entry, list);
 635
 636                list_del(&entry->list);
 637                kfree(entry);
 638        }
 639        for (i = CEC_NUM_CORE_EVENTS; i < CEC_NUM_EVENTS; i++) {
 640                while (!list_empty(&fh->events[i])) {
 641                        struct cec_event_entry *entry =
 642                                list_first_entry(&fh->events[i],
 643                                                 struct cec_event_entry, list);
 644
 645                        list_del(&entry->list);
 646                        kfree(entry);
 647                }
 648        }
 649        kfree(fh);
 650
 651        cec_put_device(devnode);
 652        filp->private_data = NULL;
 653        return 0;
 654}
 655
 656const struct file_operations cec_devnode_fops = {
 657        .owner = THIS_MODULE,
 658        .open = cec_open,
 659        .unlocked_ioctl = cec_ioctl,
 660        .release = cec_release,
 661        .poll = cec_poll,
 662        .llseek = no_llseek,
 663};
 664