linux/drivers/connector/cn_queue.c
<<
>>
Prefs
   1/*
   2 *      cn_queue.c
   3 *
   4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
   5 * All rights reserved.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  20 *
  21 */
  22
  23#include <linux/kernel.h>
  24#include <linux/module.h>
  25#include <linux/list.h>
  26#include <linux/workqueue.h>
  27#include <linux/spinlock.h>
  28#include <linux/slab.h>
  29#include <linux/skbuff.h>
  30#include <linux/suspend.h>
  31#include <linux/connector.h>
  32#include <linux/delay.h>
  33
  34static struct cn_callback_entry *
  35cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name,
  36                              struct cb_id *id,
  37                              void (*callback)(struct cn_msg *,
  38                                               struct netlink_skb_parms *))
  39{
  40        struct cn_callback_entry *cbq;
  41
  42        cbq = kzalloc(sizeof(*cbq), GFP_KERNEL);
  43        if (!cbq) {
  44                pr_err("Failed to create new callback queue.\n");
  45                return NULL;
  46        }
  47
  48        atomic_set(&cbq->refcnt, 1);
  49
  50        atomic_inc(&dev->refcnt);
  51        cbq->pdev = dev;
  52
  53        snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
  54        memcpy(&cbq->id.id, id, sizeof(struct cb_id));
  55        cbq->callback = callback;
  56        return cbq;
  57}
  58
  59void cn_queue_release_callback(struct cn_callback_entry *cbq)
  60{
  61        if (!atomic_dec_and_test(&cbq->refcnt))
  62                return;
  63
  64        atomic_dec(&cbq->pdev->refcnt);
  65        kfree(cbq);
  66}
  67
  68int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
  69{
  70        return ((i1->idx == i2->idx) && (i1->val == i2->val));
  71}
  72
  73int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
  74                          struct cb_id *id,
  75                          void (*callback)(struct cn_msg *,
  76                                           struct netlink_skb_parms *))
  77{
  78        struct cn_callback_entry *cbq, *__cbq;
  79        int found = 0;
  80
  81        cbq = cn_queue_alloc_callback_entry(dev, name, id, callback);
  82        if (!cbq)
  83                return -ENOMEM;
  84
  85        spin_lock_bh(&dev->queue_lock);
  86        list_for_each_entry(__cbq, &dev->queue_list, callback_entry) {
  87                if (cn_cb_equal(&__cbq->id.id, id)) {
  88                        found = 1;
  89                        break;
  90                }
  91        }
  92        if (!found)
  93                list_add_tail(&cbq->callback_entry, &dev->queue_list);
  94        spin_unlock_bh(&dev->queue_lock);
  95
  96        if (found) {
  97                cn_queue_release_callback(cbq);
  98                return -EINVAL;
  99        }
 100
 101        cbq->seq = 0;
 102        cbq->group = cbq->id.id.idx;
 103
 104        return 0;
 105}
 106
 107void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id)
 108{
 109        struct cn_callback_entry *cbq, *n;
 110        int found = 0;
 111
 112        spin_lock_bh(&dev->queue_lock);
 113        list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) {
 114                if (cn_cb_equal(&cbq->id.id, id)) {
 115                        list_del(&cbq->callback_entry);
 116                        found = 1;
 117                        break;
 118                }
 119        }
 120        spin_unlock_bh(&dev->queue_lock);
 121
 122        if (found)
 123                cn_queue_release_callback(cbq);
 124}
 125
 126struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls)
 127{
 128        struct cn_queue_dev *dev;
 129
 130        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 131        if (!dev)
 132                return NULL;
 133
 134        snprintf(dev->name, sizeof(dev->name), "%s", name);
 135        atomic_set(&dev->refcnt, 0);
 136        INIT_LIST_HEAD(&dev->queue_list);
 137        spin_lock_init(&dev->queue_lock);
 138
 139        dev->nls = nls;
 140
 141        return dev;
 142}
 143
 144void cn_queue_free_dev(struct cn_queue_dev *dev)
 145{
 146        struct cn_callback_entry *cbq, *n;
 147
 148        spin_lock_bh(&dev->queue_lock);
 149        list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
 150                list_del(&cbq->callback_entry);
 151        spin_unlock_bh(&dev->queue_lock);
 152
 153        while (atomic_read(&dev->refcnt)) {
 154                pr_info("Waiting for %s to become free: refcnt=%d.\n",
 155                       dev->name, atomic_read(&dev->refcnt));
 156                msleep(1000);
 157        }
 158
 159        kfree(dev);
 160        dev = NULL;
 161}
 162