linux/net/rfkill/core.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2006 - 2007 Ivo van Doorn
   3 * Copyright (C) 2007 Dmitry Torokhov
   4 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include <linux/kernel.h>
  21#include <linux/module.h>
  22#include <linux/init.h>
  23#include <linux/workqueue.h>
  24#include <linux/capability.h>
  25#include <linux/list.h>
  26#include <linux/mutex.h>
  27#include <linux/rfkill.h>
  28#include <linux/sched.h>
  29#include <linux/spinlock.h>
  30#include <linux/device.h>
  31#include <linux/miscdevice.h>
  32#include <linux/wait.h>
  33#include <linux/poll.h>
  34#include <linux/fs.h>
  35#include <linux/slab.h>
  36
  37#include "rfkill.h"
  38
  39#define POLL_INTERVAL           (5 * HZ)
  40
  41#define RFKILL_BLOCK_HW         BIT(0)
  42#define RFKILL_BLOCK_SW         BIT(1)
  43#define RFKILL_BLOCK_SW_PREV    BIT(2)
  44#define RFKILL_BLOCK_ANY        (RFKILL_BLOCK_HW |\
  45                                 RFKILL_BLOCK_SW |\
  46                                 RFKILL_BLOCK_SW_PREV)
  47#define RFKILL_BLOCK_SW_SETCALL BIT(31)
  48
  49struct rfkill {
  50        spinlock_t              lock;
  51
  52        enum rfkill_type        type;
  53
  54        unsigned long           state;
  55
  56        u32                     idx;
  57
  58        bool                    registered;
  59        bool                    persistent;
  60        bool                    polling_paused;
  61        bool                    suspended;
  62
  63        const struct rfkill_ops *ops;
  64        void                    *data;
  65
  66#ifdef CONFIG_RFKILL_LEDS
  67        struct led_trigger      led_trigger;
  68        const char              *ledtrigname;
  69#endif
  70
  71        struct device           dev;
  72        struct list_head        node;
  73
  74        struct delayed_work     poll_work;
  75        struct work_struct      uevent_work;
  76        struct work_struct      sync_work;
  77        char                    name[];
  78};
  79#define to_rfkill(d)    container_of(d, struct rfkill, dev)
  80
  81struct rfkill_int_event {
  82        struct list_head        list;
  83        struct rfkill_event     ev;
  84};
  85
  86struct rfkill_data {
  87        struct list_head        list;
  88        struct list_head        events;
  89        struct mutex            mtx;
  90        wait_queue_head_t       read_wait;
  91        bool                    input_handler;
  92};
  93
  94
  95MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
  96MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
  97MODULE_DESCRIPTION("RF switch support");
  98MODULE_LICENSE("GPL");
  99
 100
 101/*
 102 * The locking here should be made much smarter, we currently have
 103 * a bit of a stupid situation because drivers might want to register
 104 * the rfkill struct under their own lock, and take this lock during
 105 * rfkill method calls -- which will cause an AB-BA deadlock situation.
 106 *
 107 * To fix that, we need to rework this code here to be mostly lock-free
 108 * and only use the mutex for list manipulations, not to protect the
 109 * various other global variables. Then we can avoid holding the mutex
 110 * around driver operations, and all is happy.
 111 */
 112static LIST_HEAD(rfkill_list);  /* list of registered rf switches */
 113static DEFINE_MUTEX(rfkill_global_mutex);
 114static LIST_HEAD(rfkill_fds);   /* list of open fds of /dev/rfkill */
 115
 116static unsigned int rfkill_default_state = 1;
 117module_param_named(default_state, rfkill_default_state, uint, 0444);
 118MODULE_PARM_DESC(default_state,
 119                 "Default initial state for all radio types, 0 = radio off");
 120
 121static struct {
 122        bool cur, sav;
 123} rfkill_global_states[NUM_RFKILL_TYPES];
 124
 125static bool rfkill_epo_lock_active;
 126
 127
 128#ifdef CONFIG_RFKILL_LEDS
 129static void rfkill_led_trigger_event(struct rfkill *rfkill)
 130{
 131        struct led_trigger *trigger;
 132
 133        if (!rfkill->registered)
 134                return;
 135
 136        trigger = &rfkill->led_trigger;
 137
 138        if (rfkill->state & RFKILL_BLOCK_ANY)
 139                led_trigger_event(trigger, LED_OFF);
 140        else
 141                led_trigger_event(trigger, LED_FULL);
 142}
 143
 144static int rfkill_led_trigger_activate(struct led_classdev *led)
 145{
 146        struct rfkill *rfkill;
 147
 148        rfkill = container_of(led->trigger, struct rfkill, led_trigger);
 149
 150        rfkill_led_trigger_event(rfkill);
 151
 152        return 0;
 153}
 154
 155const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
 156{
 157        return rfkill->led_trigger.name;
 158}
 159EXPORT_SYMBOL(rfkill_get_led_trigger_name);
 160
 161void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
 162{
 163        BUG_ON(!rfkill);
 164
 165        rfkill->ledtrigname = name;
 166}
 167EXPORT_SYMBOL(rfkill_set_led_trigger_name);
 168
 169static int rfkill_led_trigger_register(struct rfkill *rfkill)
 170{
 171        rfkill->led_trigger.name = rfkill->ledtrigname
 172                                        ? : dev_name(&rfkill->dev);
 173        rfkill->led_trigger.activate = rfkill_led_trigger_activate;
 174        return led_trigger_register(&rfkill->led_trigger);
 175}
 176
 177static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
 178{
 179        led_trigger_unregister(&rfkill->led_trigger);
 180}
 181
 182static struct led_trigger rfkill_any_led_trigger;
 183static struct led_trigger rfkill_none_led_trigger;
 184static struct work_struct rfkill_global_led_trigger_work;
 185
 186static void rfkill_global_led_trigger_worker(struct work_struct *work)
 187{
 188        enum led_brightness brightness = LED_OFF;
 189        struct rfkill *rfkill;
 190
 191        mutex_lock(&rfkill_global_mutex);
 192        list_for_each_entry(rfkill, &rfkill_list, node) {
 193                if (!(rfkill->state & RFKILL_BLOCK_ANY)) {
 194                        brightness = LED_FULL;
 195                        break;
 196                }
 197        }
 198        mutex_unlock(&rfkill_global_mutex);
 199
 200        led_trigger_event(&rfkill_any_led_trigger, brightness);
 201        led_trigger_event(&rfkill_none_led_trigger,
 202                          brightness == LED_OFF ? LED_FULL : LED_OFF);
 203}
 204
 205static void rfkill_global_led_trigger_event(void)
 206{
 207        schedule_work(&rfkill_global_led_trigger_work);
 208}
 209
 210static int rfkill_global_led_trigger_register(void)
 211{
 212        int ret;
 213
 214        INIT_WORK(&rfkill_global_led_trigger_work,
 215                        rfkill_global_led_trigger_worker);
 216
 217        rfkill_any_led_trigger.name = "rfkill-any";
 218        ret = led_trigger_register(&rfkill_any_led_trigger);
 219        if (ret)
 220                return ret;
 221
 222        rfkill_none_led_trigger.name = "rfkill-none";
 223        ret = led_trigger_register(&rfkill_none_led_trigger);
 224        if (ret)
 225                led_trigger_unregister(&rfkill_any_led_trigger);
 226        else
 227                /* Delay activation until all global triggers are registered */
 228                rfkill_global_led_trigger_event();
 229
 230        return ret;
 231}
 232
 233static void rfkill_global_led_trigger_unregister(void)
 234{
 235        led_trigger_unregister(&rfkill_none_led_trigger);
 236        led_trigger_unregister(&rfkill_any_led_trigger);
 237        cancel_work_sync(&rfkill_global_led_trigger_work);
 238}
 239#else
 240static void rfkill_led_trigger_event(struct rfkill *rfkill)
 241{
 242}
 243
 244static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
 245{
 246        return 0;
 247}
 248
 249static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
 250{
 251}
 252
 253static void rfkill_global_led_trigger_event(void)
 254{
 255}
 256
 257static int rfkill_global_led_trigger_register(void)
 258{
 259        return 0;
 260}
 261
 262static void rfkill_global_led_trigger_unregister(void)
 263{
 264}
 265#endif /* CONFIG_RFKILL_LEDS */
 266
 267static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
 268                              enum rfkill_operation op)
 269{
 270        unsigned long flags;
 271
 272        ev->idx = rfkill->idx;
 273        ev->type = rfkill->type;
 274        ev->op = op;
 275
 276        spin_lock_irqsave(&rfkill->lock, flags);
 277        ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
 278        ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
 279                                        RFKILL_BLOCK_SW_PREV));
 280        spin_unlock_irqrestore(&rfkill->lock, flags);
 281}
 282
 283static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
 284{
 285        struct rfkill_data *data;
 286        struct rfkill_int_event *ev;
 287
 288        list_for_each_entry(data, &rfkill_fds, list) {
 289                ev = kzalloc(sizeof(*ev), GFP_KERNEL);
 290                if (!ev)
 291                        continue;
 292                rfkill_fill_event(&ev->ev, rfkill, op);
 293                mutex_lock(&data->mtx);
 294                list_add_tail(&ev->list, &data->events);
 295                mutex_unlock(&data->mtx);
 296                wake_up_interruptible(&data->read_wait);
 297        }
 298}
 299
 300static void rfkill_event(struct rfkill *rfkill)
 301{
 302        if (!rfkill->registered)
 303                return;
 304
 305        kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
 306
 307        /* also send event to /dev/rfkill */
 308        rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
 309}
 310
 311/**
 312 * rfkill_set_block - wrapper for set_block method
 313 *
 314 * @rfkill: the rfkill struct to use
 315 * @blocked: the new software state
 316 *
 317 * Calls the set_block method (when applicable) and handles notifications
 318 * etc. as well.
 319 */
 320static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
 321{
 322        unsigned long flags;
 323        bool prev, curr;
 324        int err;
 325
 326        if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
 327                return;
 328
 329        /*
 330         * Some platforms (...!) generate input events which affect the
 331         * _hard_ kill state -- whenever something tries to change the
 332         * current software state query the hardware state too.
 333         */
 334        if (rfkill->ops->query)
 335                rfkill->ops->query(rfkill, rfkill->data);
 336
 337        spin_lock_irqsave(&rfkill->lock, flags);
 338        prev = rfkill->state & RFKILL_BLOCK_SW;
 339
 340        if (prev)
 341                rfkill->state |= RFKILL_BLOCK_SW_PREV;
 342        else
 343                rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
 344
 345        if (blocked)
 346                rfkill->state |= RFKILL_BLOCK_SW;
 347        else
 348                rfkill->state &= ~RFKILL_BLOCK_SW;
 349
 350        rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
 351        spin_unlock_irqrestore(&rfkill->lock, flags);
 352
 353        err = rfkill->ops->set_block(rfkill->data, blocked);
 354
 355        spin_lock_irqsave(&rfkill->lock, flags);
 356        if (err) {
 357                /*
 358                 * Failed -- reset status to _PREV, which may be different
 359                 * from what we have set _PREV to earlier in this function
 360                 * if rfkill_set_sw_state was invoked.
 361                 */
 362                if (rfkill->state & RFKILL_BLOCK_SW_PREV)
 363                        rfkill->state |= RFKILL_BLOCK_SW;
 364                else
 365                        rfkill->state &= ~RFKILL_BLOCK_SW;
 366        }
 367        rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
 368        rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
 369        curr = rfkill->state & RFKILL_BLOCK_SW;
 370        spin_unlock_irqrestore(&rfkill->lock, flags);
 371
 372        rfkill_led_trigger_event(rfkill);
 373        rfkill_global_led_trigger_event();
 374
 375        if (prev != curr)
 376                rfkill_event(rfkill);
 377}
 378
 379static void rfkill_update_global_state(enum rfkill_type type, bool blocked)
 380{
 381        int i;
 382
 383        if (type != RFKILL_TYPE_ALL) {
 384                rfkill_global_states[type].cur = blocked;
 385                return;
 386        }
 387
 388        for (i = 0; i < NUM_RFKILL_TYPES; i++)
 389                rfkill_global_states[i].cur = blocked;
 390}
 391
 392#ifdef CONFIG_RFKILL_INPUT
 393static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
 394
 395/**
 396 * __rfkill_switch_all - Toggle state of all switches of given type
 397 * @type: type of interfaces to be affected
 398 * @blocked: the new state
 399 *
 400 * This function sets the state of all switches of given type,
 401 * unless a specific switch is suspended.
 402 *
 403 * Caller must have acquired rfkill_global_mutex.
 404 */
 405static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
 406{
 407        struct rfkill *rfkill;
 408
 409        rfkill_update_global_state(type, blocked);
 410        list_for_each_entry(rfkill, &rfkill_list, node) {
 411                if (rfkill->type != type && type != RFKILL_TYPE_ALL)
 412                        continue;
 413
 414                rfkill_set_block(rfkill, blocked);
 415        }
 416}
 417
 418/**
 419 * rfkill_switch_all - Toggle state of all switches of given type
 420 * @type: type of interfaces to be affected
 421 * @blocked: the new state
 422 *
 423 * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
 424 * Please refer to __rfkill_switch_all() for details.
 425 *
 426 * Does nothing if the EPO lock is active.
 427 */
 428void rfkill_switch_all(enum rfkill_type type, bool blocked)
 429{
 430        if (atomic_read(&rfkill_input_disabled))
 431                return;
 432
 433        mutex_lock(&rfkill_global_mutex);
 434
 435        if (!rfkill_epo_lock_active)
 436                __rfkill_switch_all(type, blocked);
 437
 438        mutex_unlock(&rfkill_global_mutex);
 439}
 440
 441/**
 442 * rfkill_epo - emergency power off all transmitters
 443 *
 444 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
 445 * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
 446 *
 447 * The global state before the EPO is saved and can be restored later
 448 * using rfkill_restore_states().
 449 */
 450void rfkill_epo(void)
 451{
 452        struct rfkill *rfkill;
 453        int i;
 454
 455        if (atomic_read(&rfkill_input_disabled))
 456                return;
 457
 458        mutex_lock(&rfkill_global_mutex);
 459
 460        rfkill_epo_lock_active = true;
 461        list_for_each_entry(rfkill, &rfkill_list, node)
 462                rfkill_set_block(rfkill, true);
 463
 464        for (i = 0; i < NUM_RFKILL_TYPES; i++) {
 465                rfkill_global_states[i].sav = rfkill_global_states[i].cur;
 466                rfkill_global_states[i].cur = true;
 467        }
 468
 469        mutex_unlock(&rfkill_global_mutex);
 470}
 471
 472/**
 473 * rfkill_restore_states - restore global states
 474 *
 475 * Restore (and sync switches to) the global state from the
 476 * states in rfkill_default_states.  This can undo the effects of
 477 * a call to rfkill_epo().
 478 */
 479void rfkill_restore_states(void)
 480{
 481        int i;
 482
 483        if (atomic_read(&rfkill_input_disabled))
 484                return;
 485
 486        mutex_lock(&rfkill_global_mutex);
 487
 488        rfkill_epo_lock_active = false;
 489        for (i = 0; i < NUM_RFKILL_TYPES; i++)
 490                __rfkill_switch_all(i, rfkill_global_states[i].sav);
 491        mutex_unlock(&rfkill_global_mutex);
 492}
 493
 494/**
 495 * rfkill_remove_epo_lock - unlock state changes
 496 *
 497 * Used by rfkill-input manually unlock state changes, when
 498 * the EPO switch is deactivated.
 499 */
 500void rfkill_remove_epo_lock(void)
 501{
 502        if (atomic_read(&rfkill_input_disabled))
 503                return;
 504
 505        mutex_lock(&rfkill_global_mutex);
 506        rfkill_epo_lock_active = false;
 507        mutex_unlock(&rfkill_global_mutex);
 508}
 509
 510/**
 511 * rfkill_is_epo_lock_active - returns true EPO is active
 512 *
 513 * Returns 0 (false) if there is NOT an active EPO contidion,
 514 * and 1 (true) if there is an active EPO contition, which
 515 * locks all radios in one of the BLOCKED states.
 516 *
 517 * Can be called in atomic context.
 518 */
 519bool rfkill_is_epo_lock_active(void)
 520{
 521        return rfkill_epo_lock_active;
 522}
 523
 524/**
 525 * rfkill_get_global_sw_state - returns global state for a type
 526 * @type: the type to get the global state of
 527 *
 528 * Returns the current global state for a given wireless
 529 * device type.
 530 */
 531bool rfkill_get_global_sw_state(const enum rfkill_type type)
 532{
 533        return rfkill_global_states[type].cur;
 534}
 535#endif
 536
 537bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
 538{
 539        unsigned long flags;
 540        bool ret, prev;
 541
 542        BUG_ON(!rfkill);
 543
 544        spin_lock_irqsave(&rfkill->lock, flags);
 545        prev = !!(rfkill->state & RFKILL_BLOCK_HW);
 546        if (blocked)
 547                rfkill->state |= RFKILL_BLOCK_HW;
 548        else
 549                rfkill->state &= ~RFKILL_BLOCK_HW;
 550        ret = !!(rfkill->state & RFKILL_BLOCK_ANY);
 551        spin_unlock_irqrestore(&rfkill->lock, flags);
 552
 553        rfkill_led_trigger_event(rfkill);
 554        rfkill_global_led_trigger_event();
 555
 556        if (rfkill->registered && prev != blocked)
 557                schedule_work(&rfkill->uevent_work);
 558
 559        return ret;
 560}
 561EXPORT_SYMBOL(rfkill_set_hw_state);
 562
 563static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
 564{
 565        u32 bit = RFKILL_BLOCK_SW;
 566
 567        /* if in a ops->set_block right now, use other bit */
 568        if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
 569                bit = RFKILL_BLOCK_SW_PREV;
 570
 571        if (blocked)
 572                rfkill->state |= bit;
 573        else
 574                rfkill->state &= ~bit;
 575}
 576
 577bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
 578{
 579        unsigned long flags;
 580        bool prev, hwblock;
 581
 582        BUG_ON(!rfkill);
 583
 584        spin_lock_irqsave(&rfkill->lock, flags);
 585        prev = !!(rfkill->state & RFKILL_BLOCK_SW);
 586        __rfkill_set_sw_state(rfkill, blocked);
 587        hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
 588        blocked = blocked || hwblock;
 589        spin_unlock_irqrestore(&rfkill->lock, flags);
 590
 591        if (!rfkill->registered)
 592                return blocked;
 593
 594        if (prev != blocked && !hwblock)
 595                schedule_work(&rfkill->uevent_work);
 596
 597        rfkill_led_trigger_event(rfkill);
 598        rfkill_global_led_trigger_event();
 599
 600        return blocked;
 601}
 602EXPORT_SYMBOL(rfkill_set_sw_state);
 603
 604void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
 605{
 606        unsigned long flags;
 607
 608        BUG_ON(!rfkill);
 609        BUG_ON(rfkill->registered);
 610
 611        spin_lock_irqsave(&rfkill->lock, flags);
 612        __rfkill_set_sw_state(rfkill, blocked);
 613        rfkill->persistent = true;
 614        spin_unlock_irqrestore(&rfkill->lock, flags);
 615}
 616EXPORT_SYMBOL(rfkill_init_sw_state);
 617
 618void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
 619{
 620        unsigned long flags;
 621        bool swprev, hwprev;
 622
 623        BUG_ON(!rfkill);
 624
 625        spin_lock_irqsave(&rfkill->lock, flags);
 626
 627        /*
 628         * No need to care about prev/setblock ... this is for uevent only
 629         * and that will get triggered by rfkill_set_block anyway.
 630         */
 631        swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
 632        hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
 633        __rfkill_set_sw_state(rfkill, sw);
 634        if (hw)
 635                rfkill->state |= RFKILL_BLOCK_HW;
 636        else
 637                rfkill->state &= ~RFKILL_BLOCK_HW;
 638
 639        spin_unlock_irqrestore(&rfkill->lock, flags);
 640
 641        if (!rfkill->registered) {
 642                rfkill->persistent = true;
 643        } else {
 644                if (swprev != sw || hwprev != hw)
 645                        schedule_work(&rfkill->uevent_work);
 646
 647                rfkill_led_trigger_event(rfkill);
 648                rfkill_global_led_trigger_event();
 649        }
 650}
 651EXPORT_SYMBOL(rfkill_set_states);
 652
 653static const char * const rfkill_types[] = {
 654        NULL, /* RFKILL_TYPE_ALL */
 655        "wlan",
 656        "bluetooth",
 657        "ultrawideband",
 658        "wimax",
 659        "wwan",
 660        "gps",
 661        "fm",
 662        "nfc",
 663};
 664
 665enum rfkill_type rfkill_find_type(const char *name)
 666{
 667        int i;
 668
 669        BUILD_BUG_ON(ARRAY_SIZE(rfkill_types) != NUM_RFKILL_TYPES);
 670
 671        if (!name)
 672                return RFKILL_TYPE_ALL;
 673
 674        for (i = 1; i < NUM_RFKILL_TYPES; i++)
 675                if (!strcmp(name, rfkill_types[i]))
 676                        return i;
 677        return RFKILL_TYPE_ALL;
 678}
 679EXPORT_SYMBOL(rfkill_find_type);
 680
 681static ssize_t name_show(struct device *dev, struct device_attribute *attr,
 682                         char *buf)
 683{
 684        struct rfkill *rfkill = to_rfkill(dev);
 685
 686        return sprintf(buf, "%s\n", rfkill->name);
 687}
 688static DEVICE_ATTR_RO(name);
 689
 690static ssize_t type_show(struct device *dev, struct device_attribute *attr,
 691                         char *buf)
 692{
 693        struct rfkill *rfkill = to_rfkill(dev);
 694
 695        return sprintf(buf, "%s\n", rfkill_types[rfkill->type]);
 696}
 697static DEVICE_ATTR_RO(type);
 698
 699static ssize_t index_show(struct device *dev, struct device_attribute *attr,
 700                          char *buf)
 701{
 702        struct rfkill *rfkill = to_rfkill(dev);
 703
 704        return sprintf(buf, "%d\n", rfkill->idx);
 705}
 706static DEVICE_ATTR_RO(index);
 707
 708static ssize_t persistent_show(struct device *dev,
 709                               struct device_attribute *attr, char *buf)
 710{
 711        struct rfkill *rfkill = to_rfkill(dev);
 712
 713        return sprintf(buf, "%d\n", rfkill->persistent);
 714}
 715static DEVICE_ATTR_RO(persistent);
 716
 717static ssize_t hard_show(struct device *dev, struct device_attribute *attr,
 718                         char *buf)
 719{
 720        struct rfkill *rfkill = to_rfkill(dev);
 721
 722        return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
 723}
 724static DEVICE_ATTR_RO(hard);
 725
 726static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
 727                         char *buf)
 728{
 729        struct rfkill *rfkill = to_rfkill(dev);
 730
 731        return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
 732}
 733
 734static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
 735                          const char *buf, size_t count)
 736{
 737        struct rfkill *rfkill = to_rfkill(dev);
 738        unsigned long state;
 739        int err;
 740
 741        if (!capable(CAP_NET_ADMIN))
 742                return -EPERM;
 743
 744        err = kstrtoul(buf, 0, &state);
 745        if (err)
 746                return err;
 747
 748        if (state > 1 )
 749                return -EINVAL;
 750
 751        mutex_lock(&rfkill_global_mutex);
 752        rfkill_set_block(rfkill, state);
 753        mutex_unlock(&rfkill_global_mutex);
 754
 755        return count;
 756}
 757static DEVICE_ATTR_RW(soft);
 758
 759static u8 user_state_from_blocked(unsigned long state)
 760{
 761        if (state & RFKILL_BLOCK_HW)
 762                return RFKILL_USER_STATE_HARD_BLOCKED;
 763        if (state & RFKILL_BLOCK_SW)
 764                return RFKILL_USER_STATE_SOFT_BLOCKED;
 765
 766        return RFKILL_USER_STATE_UNBLOCKED;
 767}
 768
 769static ssize_t state_show(struct device *dev, struct device_attribute *attr,
 770                          char *buf)
 771{
 772        struct rfkill *rfkill = to_rfkill(dev);
 773
 774        return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
 775}
 776
 777static ssize_t state_store(struct device *dev, struct device_attribute *attr,
 778                           const char *buf, size_t count)
 779{
 780        struct rfkill *rfkill = to_rfkill(dev);
 781        unsigned long state;
 782        int err;
 783
 784        if (!capable(CAP_NET_ADMIN))
 785                return -EPERM;
 786
 787        err = kstrtoul(buf, 0, &state);
 788        if (err)
 789                return err;
 790
 791        if (state != RFKILL_USER_STATE_SOFT_BLOCKED &&
 792            state != RFKILL_USER_STATE_UNBLOCKED)
 793                return -EINVAL;
 794
 795        mutex_lock(&rfkill_global_mutex);
 796        rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
 797        mutex_unlock(&rfkill_global_mutex);
 798
 799        return count;
 800}
 801static DEVICE_ATTR_RW(state);
 802
 803static struct attribute *rfkill_dev_attrs[] = {
 804        &dev_attr_name.attr,
 805        &dev_attr_type.attr,
 806        &dev_attr_index.attr,
 807        &dev_attr_persistent.attr,
 808        &dev_attr_state.attr,
 809        &dev_attr_soft.attr,
 810        &dev_attr_hard.attr,
 811        NULL,
 812};
 813ATTRIBUTE_GROUPS(rfkill_dev);
 814
 815static void rfkill_release(struct device *dev)
 816{
 817        struct rfkill *rfkill = to_rfkill(dev);
 818
 819        kfree(rfkill);
 820}
 821
 822static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
 823{
 824        struct rfkill *rfkill = to_rfkill(dev);
 825        unsigned long flags;
 826        u32 state;
 827        int error;
 828
 829        error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
 830        if (error)
 831                return error;
 832        error = add_uevent_var(env, "RFKILL_TYPE=%s",
 833                               rfkill_types[rfkill->type]);
 834        if (error)
 835                return error;
 836        spin_lock_irqsave(&rfkill->lock, flags);
 837        state = rfkill->state;
 838        spin_unlock_irqrestore(&rfkill->lock, flags);
 839        error = add_uevent_var(env, "RFKILL_STATE=%d",
 840                               user_state_from_blocked(state));
 841        return error;
 842}
 843
 844void rfkill_pause_polling(struct rfkill *rfkill)
 845{
 846        BUG_ON(!rfkill);
 847
 848        if (!rfkill->ops->poll)
 849                return;
 850
 851        rfkill->polling_paused = true;
 852        cancel_delayed_work_sync(&rfkill->poll_work);
 853}
 854EXPORT_SYMBOL(rfkill_pause_polling);
 855
 856void rfkill_resume_polling(struct rfkill *rfkill)
 857{
 858        BUG_ON(!rfkill);
 859
 860        if (!rfkill->ops->poll)
 861                return;
 862
 863        rfkill->polling_paused = false;
 864
 865        if (rfkill->suspended)
 866                return;
 867
 868        queue_delayed_work(system_power_efficient_wq,
 869                           &rfkill->poll_work, 0);
 870}
 871EXPORT_SYMBOL(rfkill_resume_polling);
 872
 873#ifdef CONFIG_PM_SLEEP
 874static int rfkill_suspend(struct device *dev)
 875{
 876        struct rfkill *rfkill = to_rfkill(dev);
 877
 878        rfkill->suspended = true;
 879        cancel_delayed_work_sync(&rfkill->poll_work);
 880
 881        return 0;
 882}
 883
 884static int rfkill_resume(struct device *dev)
 885{
 886        struct rfkill *rfkill = to_rfkill(dev);
 887        bool cur;
 888
 889        rfkill->suspended = false;
 890
 891        if (!rfkill->persistent) {
 892                cur = !!(rfkill->state & RFKILL_BLOCK_SW);
 893                rfkill_set_block(rfkill, cur);
 894        }
 895
 896        if (rfkill->ops->poll && !rfkill->polling_paused)
 897                queue_delayed_work(system_power_efficient_wq,
 898                                   &rfkill->poll_work, 0);
 899
 900        return 0;
 901}
 902
 903static SIMPLE_DEV_PM_OPS(rfkill_pm_ops, rfkill_suspend, rfkill_resume);
 904#define RFKILL_PM_OPS (&rfkill_pm_ops)
 905#else
 906#define RFKILL_PM_OPS NULL
 907#endif
 908
 909static struct class rfkill_class = {
 910        .name           = "rfkill",
 911        .dev_release    = rfkill_release,
 912        .dev_groups     = rfkill_dev_groups,
 913        .dev_uevent     = rfkill_dev_uevent,
 914        .pm             = RFKILL_PM_OPS,
 915};
 916
 917bool rfkill_blocked(struct rfkill *rfkill)
 918{
 919        unsigned long flags;
 920        u32 state;
 921
 922        spin_lock_irqsave(&rfkill->lock, flags);
 923        state = rfkill->state;
 924        spin_unlock_irqrestore(&rfkill->lock, flags);
 925
 926        return !!(state & RFKILL_BLOCK_ANY);
 927}
 928EXPORT_SYMBOL(rfkill_blocked);
 929
 930
 931struct rfkill * __must_check rfkill_alloc(const char *name,
 932                                          struct device *parent,
 933                                          const enum rfkill_type type,
 934                                          const struct rfkill_ops *ops,
 935                                          void *ops_data)
 936{
 937        struct rfkill *rfkill;
 938        struct device *dev;
 939
 940        if (WARN_ON(!ops))
 941                return NULL;
 942
 943        if (WARN_ON(!ops->set_block))
 944                return NULL;
 945
 946        if (WARN_ON(!name))
 947                return NULL;
 948
 949        if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
 950                return NULL;
 951
 952        rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
 953        if (!rfkill)
 954                return NULL;
 955
 956        spin_lock_init(&rfkill->lock);
 957        INIT_LIST_HEAD(&rfkill->node);
 958        rfkill->type = type;
 959        strcpy(rfkill->name, name);
 960        rfkill->ops = ops;
 961        rfkill->data = ops_data;
 962
 963        dev = &rfkill->dev;
 964        dev->class = &rfkill_class;
 965        dev->parent = parent;
 966        device_initialize(dev);
 967
 968        return rfkill;
 969}
 970EXPORT_SYMBOL(rfkill_alloc);
 971
 972static void rfkill_poll(struct work_struct *work)
 973{
 974        struct rfkill *rfkill;
 975
 976        rfkill = container_of(work, struct rfkill, poll_work.work);
 977
 978        /*
 979         * Poll hardware state -- driver will use one of the
 980         * rfkill_set{,_hw,_sw}_state functions and use its
 981         * return value to update the current status.
 982         */
 983        rfkill->ops->poll(rfkill, rfkill->data);
 984
 985        queue_delayed_work(system_power_efficient_wq,
 986                &rfkill->poll_work,
 987                round_jiffies_relative(POLL_INTERVAL));
 988}
 989
 990static void rfkill_uevent_work(struct work_struct *work)
 991{
 992        struct rfkill *rfkill;
 993
 994        rfkill = container_of(work, struct rfkill, uevent_work);
 995
 996        mutex_lock(&rfkill_global_mutex);
 997        rfkill_event(rfkill);
 998        mutex_unlock(&rfkill_global_mutex);
 999}
1000
1001static void rfkill_sync_work(struct work_struct *work)
1002{
1003        struct rfkill *rfkill;
1004        bool cur;
1005
1006        rfkill = container_of(work, struct rfkill, sync_work);
1007
1008        mutex_lock(&rfkill_global_mutex);
1009        cur = rfkill_global_states[rfkill->type].cur;
1010        rfkill_set_block(rfkill, cur);
1011        mutex_unlock(&rfkill_global_mutex);
1012}
1013
1014int __must_check rfkill_register(struct rfkill *rfkill)
1015{
1016        static unsigned long rfkill_no;
1017        struct device *dev = &rfkill->dev;
1018        int error;
1019
1020        BUG_ON(!rfkill);
1021
1022        mutex_lock(&rfkill_global_mutex);
1023
1024        if (rfkill->registered) {
1025                error = -EALREADY;
1026                goto unlock;
1027        }
1028
1029        rfkill->idx = rfkill_no;
1030        dev_set_name(dev, "rfkill%lu", rfkill_no);
1031        rfkill_no++;
1032
1033        list_add_tail(&rfkill->node, &rfkill_list);
1034
1035        error = device_add(dev);
1036        if (error)
1037                goto remove;
1038
1039        error = rfkill_led_trigger_register(rfkill);
1040        if (error)
1041                goto devdel;
1042
1043        rfkill->registered = true;
1044
1045        INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
1046        INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
1047        INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
1048
1049        if (rfkill->ops->poll)
1050                queue_delayed_work(system_power_efficient_wq,
1051                        &rfkill->poll_work,
1052                        round_jiffies_relative(POLL_INTERVAL));
1053
1054        if (!rfkill->persistent || rfkill_epo_lock_active) {
1055                schedule_work(&rfkill->sync_work);
1056        } else {
1057#ifdef CONFIG_RFKILL_INPUT
1058                bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
1059
1060                if (!atomic_read(&rfkill_input_disabled))
1061                        __rfkill_switch_all(rfkill->type, soft_blocked);
1062#endif
1063        }
1064
1065        rfkill_global_led_trigger_event();
1066        rfkill_send_events(rfkill, RFKILL_OP_ADD);
1067
1068        mutex_unlock(&rfkill_global_mutex);
1069        return 0;
1070
1071 devdel:
1072        device_del(&rfkill->dev);
1073 remove:
1074        list_del_init(&rfkill->node);
1075 unlock:
1076        mutex_unlock(&rfkill_global_mutex);
1077        return error;
1078}
1079EXPORT_SYMBOL(rfkill_register);
1080
1081void rfkill_unregister(struct rfkill *rfkill)
1082{
1083        BUG_ON(!rfkill);
1084
1085        if (rfkill->ops->poll)
1086                cancel_delayed_work_sync(&rfkill->poll_work);
1087
1088        cancel_work_sync(&rfkill->uevent_work);
1089        cancel_work_sync(&rfkill->sync_work);
1090
1091        rfkill->registered = false;
1092
1093        device_del(&rfkill->dev);
1094
1095        mutex_lock(&rfkill_global_mutex);
1096        rfkill_send_events(rfkill, RFKILL_OP_DEL);
1097        list_del_init(&rfkill->node);
1098        rfkill_global_led_trigger_event();
1099        mutex_unlock(&rfkill_global_mutex);
1100
1101        rfkill_led_trigger_unregister(rfkill);
1102}
1103EXPORT_SYMBOL(rfkill_unregister);
1104
1105void rfkill_destroy(struct rfkill *rfkill)
1106{
1107        if (rfkill)
1108                put_device(&rfkill->dev);
1109}
1110EXPORT_SYMBOL(rfkill_destroy);
1111
1112static int rfkill_fop_open(struct inode *inode, struct file *file)
1113{
1114        struct rfkill_data *data;
1115        struct rfkill *rfkill;
1116        struct rfkill_int_event *ev, *tmp;
1117
1118        data = kzalloc(sizeof(*data), GFP_KERNEL);
1119        if (!data)
1120                return -ENOMEM;
1121
1122        INIT_LIST_HEAD(&data->events);
1123        mutex_init(&data->mtx);
1124        init_waitqueue_head(&data->read_wait);
1125
1126        mutex_lock(&rfkill_global_mutex);
1127        mutex_lock(&data->mtx);
1128        /*
1129         * start getting events from elsewhere but hold mtx to get
1130         * startup events added first
1131         */
1132
1133        list_for_each_entry(rfkill, &rfkill_list, node) {
1134                ev = kzalloc(sizeof(*ev), GFP_KERNEL);
1135                if (!ev)
1136                        goto free;
1137                rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
1138                list_add_tail(&ev->list, &data->events);
1139        }
1140        list_add(&data->list, &rfkill_fds);
1141        mutex_unlock(&data->mtx);
1142        mutex_unlock(&rfkill_global_mutex);
1143
1144        file->private_data = data;
1145
1146        return nonseekable_open(inode, file);
1147
1148 free:
1149        mutex_unlock(&data->mtx);
1150        mutex_unlock(&rfkill_global_mutex);
1151        mutex_destroy(&data->mtx);
1152        list_for_each_entry_safe(ev, tmp, &data->events, list)
1153                kfree(ev);
1154        kfree(data);
1155        return -ENOMEM;
1156}
1157
1158static __poll_t rfkill_fop_poll(struct file *file, poll_table *wait)
1159{
1160        struct rfkill_data *data = file->private_data;
1161        __poll_t res = EPOLLOUT | EPOLLWRNORM;
1162
1163        poll_wait(file, &data->read_wait, wait);
1164
1165        mutex_lock(&data->mtx);
1166        if (!list_empty(&data->events))
1167                res = EPOLLIN | EPOLLRDNORM;
1168        mutex_unlock(&data->mtx);
1169
1170        return res;
1171}
1172
1173static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
1174                               size_t count, loff_t *pos)
1175{
1176        struct rfkill_data *data = file->private_data;
1177        struct rfkill_int_event *ev;
1178        unsigned long sz;
1179        int ret;
1180
1181        mutex_lock(&data->mtx);
1182
1183        while (list_empty(&data->events)) {
1184                if (file->f_flags & O_NONBLOCK) {
1185                        ret = -EAGAIN;
1186                        goto out;
1187                }
1188                mutex_unlock(&data->mtx);
1189                /* since we re-check and it just compares pointers,
1190                 * using !list_empty() without locking isn't a problem
1191                 */
1192                ret = wait_event_interruptible(data->read_wait,
1193                                               !list_empty(&data->events));
1194                mutex_lock(&data->mtx);
1195
1196                if (ret)
1197                        goto out;
1198        }
1199
1200        ev = list_first_entry(&data->events, struct rfkill_int_event,
1201                                list);
1202
1203        sz = min_t(unsigned long, sizeof(ev->ev), count);
1204        ret = sz;
1205        if (copy_to_user(buf, &ev->ev, sz))
1206                ret = -EFAULT;
1207
1208        list_del(&ev->list);
1209        kfree(ev);
1210 out:
1211        mutex_unlock(&data->mtx);
1212        return ret;
1213}
1214
1215static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
1216                                size_t count, loff_t *pos)
1217{
1218        struct rfkill *rfkill;
1219        struct rfkill_event ev;
1220        int ret;
1221
1222        /* we don't need the 'hard' variable but accept it */
1223        if (count < RFKILL_EVENT_SIZE_V1 - 1)
1224                return -EINVAL;
1225
1226        /*
1227         * Copy as much data as we can accept into our 'ev' buffer,
1228         * but tell userspace how much we've copied so it can determine
1229         * our API version even in a write() call, if it cares.
1230         */
1231        count = min(count, sizeof(ev));
1232        if (copy_from_user(&ev, buf, count))
1233                return -EFAULT;
1234
1235        if (ev.type >= NUM_RFKILL_TYPES)
1236                return -EINVAL;
1237
1238        mutex_lock(&rfkill_global_mutex);
1239
1240        switch (ev.op) {
1241        case RFKILL_OP_CHANGE_ALL:
1242                rfkill_update_global_state(ev.type, ev.soft);
1243                list_for_each_entry(rfkill, &rfkill_list, node)
1244                        if (rfkill->type == ev.type ||
1245                            ev.type == RFKILL_TYPE_ALL)
1246                                rfkill_set_block(rfkill, ev.soft);
1247                ret = 0;
1248                break;
1249        case RFKILL_OP_CHANGE:
1250                list_for_each_entry(rfkill, &rfkill_list, node)
1251                        if (rfkill->idx == ev.idx &&
1252                            (rfkill->type == ev.type ||
1253                             ev.type == RFKILL_TYPE_ALL))
1254                                rfkill_set_block(rfkill, ev.soft);
1255                ret = 0;
1256                break;
1257        default:
1258                ret = -EINVAL;
1259                break;
1260        }
1261
1262        mutex_unlock(&rfkill_global_mutex);
1263
1264        return ret ?: count;
1265}
1266
1267static int rfkill_fop_release(struct inode *inode, struct file *file)
1268{
1269        struct rfkill_data *data = file->private_data;
1270        struct rfkill_int_event *ev, *tmp;
1271
1272        mutex_lock(&rfkill_global_mutex);
1273        list_del(&data->list);
1274        mutex_unlock(&rfkill_global_mutex);
1275
1276        mutex_destroy(&data->mtx);
1277        list_for_each_entry_safe(ev, tmp, &data->events, list)
1278                kfree(ev);
1279
1280#ifdef CONFIG_RFKILL_INPUT
1281        if (data->input_handler)
1282                if (atomic_dec_return(&rfkill_input_disabled) == 0)
1283                        printk(KERN_DEBUG "rfkill: input handler enabled\n");
1284#endif
1285
1286        kfree(data);
1287
1288        return 0;
1289}
1290
1291#ifdef CONFIG_RFKILL_INPUT
1292static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
1293                             unsigned long arg)
1294{
1295        struct rfkill_data *data = file->private_data;
1296
1297        if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
1298                return -ENOSYS;
1299
1300        if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT)
1301                return -ENOSYS;
1302
1303        mutex_lock(&data->mtx);
1304
1305        if (!data->input_handler) {
1306                if (atomic_inc_return(&rfkill_input_disabled) == 1)
1307                        printk(KERN_DEBUG "rfkill: input handler disabled\n");
1308                data->input_handler = true;
1309        }
1310
1311        mutex_unlock(&data->mtx);
1312
1313        return 0;
1314}
1315#endif
1316
1317static const struct file_operations rfkill_fops = {
1318        .owner          = THIS_MODULE,
1319        .open           = rfkill_fop_open,
1320        .read           = rfkill_fop_read,
1321        .write          = rfkill_fop_write,
1322        .poll           = rfkill_fop_poll,
1323        .release        = rfkill_fop_release,
1324#ifdef CONFIG_RFKILL_INPUT
1325        .unlocked_ioctl = rfkill_fop_ioctl,
1326        .compat_ioctl   = rfkill_fop_ioctl,
1327#endif
1328        .llseek         = no_llseek,
1329};
1330
1331static struct miscdevice rfkill_miscdev = {
1332        .name   = "rfkill",
1333        .fops   = &rfkill_fops,
1334        .minor  = MISC_DYNAMIC_MINOR,
1335};
1336
1337static int __init rfkill_init(void)
1338{
1339        int error;
1340
1341        rfkill_update_global_state(RFKILL_TYPE_ALL, !rfkill_default_state);
1342
1343        error = class_register(&rfkill_class);
1344        if (error)
1345                goto error_class;
1346
1347        error = misc_register(&rfkill_miscdev);
1348        if (error)
1349                goto error_misc;
1350
1351        error = rfkill_global_led_trigger_register();
1352        if (error)
1353                goto error_led_trigger;
1354
1355#ifdef CONFIG_RFKILL_INPUT
1356        error = rfkill_handler_init();
1357        if (error)
1358                goto error_input;
1359#endif
1360
1361        return 0;
1362
1363#ifdef CONFIG_RFKILL_INPUT
1364error_input:
1365        rfkill_global_led_trigger_unregister();
1366#endif
1367error_led_trigger:
1368        misc_deregister(&rfkill_miscdev);
1369error_misc:
1370        class_unregister(&rfkill_class);
1371error_class:
1372        return error;
1373}
1374subsys_initcall(rfkill_init);
1375
1376static void __exit rfkill_exit(void)
1377{
1378#ifdef CONFIG_RFKILL_INPUT
1379        rfkill_handler_exit();
1380#endif
1381        rfkill_global_led_trigger_unregister();
1382        misc_deregister(&rfkill_miscdev);
1383        class_unregister(&rfkill_class);
1384}
1385module_exit(rfkill_exit);
1386