linux/net/rfkill/core.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2006 - 2007 Ivo van Doorn
   3 * Copyright (C) 2007 Dmitry Torokhov
   4 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the
  18 * Free Software Foundation, Inc.,
  19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  20 */
  21
  22#include <linux/kernel.h>
  23#include <linux/module.h>
  24#include <linux/init.h>
  25#include <linux/workqueue.h>
  26#include <linux/capability.h>
  27#include <linux/list.h>
  28#include <linux/mutex.h>
  29#include <linux/rfkill.h>
  30#include <linux/sched.h>
  31#include <linux/spinlock.h>
  32#include <linux/device.h>
  33#include <linux/miscdevice.h>
  34#include <linux/wait.h>
  35#include <linux/poll.h>
  36#include <linux/fs.h>
  37#include <linux/slab.h>
  38
  39#include "rfkill.h"
  40
  41#define POLL_INTERVAL           (5 * HZ)
  42
  43#define RFKILL_BLOCK_HW         BIT(0)
  44#define RFKILL_BLOCK_SW         BIT(1)
  45#define RFKILL_BLOCK_SW_PREV    BIT(2)
  46#define RFKILL_BLOCK_ANY        (RFKILL_BLOCK_HW |\
  47                                 RFKILL_BLOCK_SW |\
  48                                 RFKILL_BLOCK_SW_PREV)
  49#define RFKILL_BLOCK_SW_SETCALL BIT(31)
  50
  51struct rfkill {
  52        spinlock_t              lock;
  53
  54        const char              *name;
  55        enum rfkill_type        type;
  56
  57        unsigned long           state;
  58
  59        u32                     idx;
  60
  61        bool                    registered;
  62        bool                    persistent;
  63
  64        const struct rfkill_ops *ops;
  65        void                    *data;
  66
  67#ifdef CONFIG_RFKILL_LEDS
  68        struct led_trigger      led_trigger;
  69        const char              *ledtrigname;
  70#endif
  71
  72        struct device           dev;
  73        struct list_head        node;
  74
  75        struct delayed_work     poll_work;
  76        struct work_struct      uevent_work;
  77        struct work_struct      sync_work;
  78};
  79#define to_rfkill(d)    container_of(d, struct rfkill, dev)
  80
  81struct rfkill_int_event {
  82        struct list_head        list;
  83        struct rfkill_event     ev;
  84};
  85
  86struct rfkill_data {
  87        struct list_head        list;
  88        struct list_head        events;
  89        struct mutex            mtx;
  90        wait_queue_head_t       read_wait;
  91        bool                    input_handler;
  92};
  93
  94
  95MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
  96MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
  97MODULE_DESCRIPTION("RF switch support");
  98MODULE_LICENSE("GPL");
  99
 100
 101/*
 102 * The locking here should be made much smarter, we currently have
 103 * a bit of a stupid situation because drivers might want to register
 104 * the rfkill struct under their own lock, and take this lock during
 105 * rfkill method calls -- which will cause an AB-BA deadlock situation.
 106 *
 107 * To fix that, we need to rework this code here to be mostly lock-free
 108 * and only use the mutex for list manipulations, not to protect the
 109 * various other global variables. Then we can avoid holding the mutex
 110 * around driver operations, and all is happy.
 111 */
 112static LIST_HEAD(rfkill_list);  /* list of registered rf switches */
 113static DEFINE_MUTEX(rfkill_global_mutex);
 114static LIST_HEAD(rfkill_fds);   /* list of open fds of /dev/rfkill */
 115
 116static unsigned int rfkill_default_state = 1;
 117module_param_named(default_state, rfkill_default_state, uint, 0444);
 118MODULE_PARM_DESC(default_state,
 119                 "Default initial state for all radio types, 0 = radio off");
 120
 121static struct {
 122        bool cur, sav;
 123} rfkill_global_states[NUM_RFKILL_TYPES];
 124
 125static bool rfkill_epo_lock_active;
 126
 127
 128#ifdef CONFIG_RFKILL_LEDS
 129static void rfkill_led_trigger_event(struct rfkill *rfkill)
 130{
 131        struct led_trigger *trigger;
 132
 133        if (!rfkill->registered)
 134                return;
 135
 136        trigger = &rfkill->led_trigger;
 137
 138        if (rfkill->state & RFKILL_BLOCK_ANY)
 139                led_trigger_event(trigger, LED_OFF);
 140        else
 141                led_trigger_event(trigger, LED_FULL);
 142}
 143
 144static void rfkill_led_trigger_activate(struct led_classdev *led)
 145{
 146        struct rfkill *rfkill;
 147
 148        rfkill = container_of(led->trigger, struct rfkill, led_trigger);
 149
 150        rfkill_led_trigger_event(rfkill);
 151}
 152
 153const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
 154{
 155        return rfkill->led_trigger.name;
 156}
 157EXPORT_SYMBOL(rfkill_get_led_trigger_name);
 158
 159void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
 160{
 161        BUG_ON(!rfkill);
 162
 163        rfkill->ledtrigname = name;
 164}
 165EXPORT_SYMBOL(rfkill_set_led_trigger_name);
 166
 167static int rfkill_led_trigger_register(struct rfkill *rfkill)
 168{
 169        rfkill->led_trigger.name = rfkill->ledtrigname
 170                                        ? : dev_name(&rfkill->dev);
 171        rfkill->led_trigger.activate = rfkill_led_trigger_activate;
 172        return led_trigger_register(&rfkill->led_trigger);
 173}
 174
 175static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
 176{
 177        led_trigger_unregister(&rfkill->led_trigger);
 178}
 179#else
 180static void rfkill_led_trigger_event(struct rfkill *rfkill)
 181{
 182}
 183
 184static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
 185{
 186        return 0;
 187}
 188
 189static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
 190{
 191}
 192#endif /* CONFIG_RFKILL_LEDS */
 193
 194static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
 195                              enum rfkill_operation op)
 196{
 197        unsigned long flags;
 198
 199        ev->idx = rfkill->idx;
 200        ev->type = rfkill->type;
 201        ev->op = op;
 202
 203        spin_lock_irqsave(&rfkill->lock, flags);
 204        ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
 205        ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
 206                                        RFKILL_BLOCK_SW_PREV));
 207        spin_unlock_irqrestore(&rfkill->lock, flags);
 208}
 209
 210static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
 211{
 212        struct rfkill_data *data;
 213        struct rfkill_int_event *ev;
 214
 215        list_for_each_entry(data, &rfkill_fds, list) {
 216                ev = kzalloc(sizeof(*ev), GFP_KERNEL);
 217                if (!ev)
 218                        continue;
 219                rfkill_fill_event(&ev->ev, rfkill, op);
 220                mutex_lock(&data->mtx);
 221                list_add_tail(&ev->list, &data->events);
 222                mutex_unlock(&data->mtx);
 223                wake_up_interruptible(&data->read_wait);
 224        }
 225}
 226
 227static void rfkill_event(struct rfkill *rfkill)
 228{
 229        if (!rfkill->registered)
 230                return;
 231
 232        kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
 233
 234        /* also send event to /dev/rfkill */
 235        rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
 236}
 237
 238static bool __rfkill_set_hw_state(struct rfkill *rfkill,
 239                                  bool blocked, bool *change)
 240{
 241        unsigned long flags;
 242        bool prev, any;
 243
 244        BUG_ON(!rfkill);
 245
 246        spin_lock_irqsave(&rfkill->lock, flags);
 247        prev = !!(rfkill->state & RFKILL_BLOCK_HW);
 248        if (blocked)
 249                rfkill->state |= RFKILL_BLOCK_HW;
 250        else
 251                rfkill->state &= ~RFKILL_BLOCK_HW;
 252        *change = prev != blocked;
 253        any = !!(rfkill->state & RFKILL_BLOCK_ANY);
 254        spin_unlock_irqrestore(&rfkill->lock, flags);
 255
 256        rfkill_led_trigger_event(rfkill);
 257
 258        return any;
 259}
 260
 261/**
 262 * rfkill_set_block - wrapper for set_block method
 263 *
 264 * @rfkill: the rfkill struct to use
 265 * @blocked: the new software state
 266 *
 267 * Calls the set_block method (when applicable) and handles notifications
 268 * etc. as well.
 269 */
 270static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
 271{
 272        unsigned long flags;
 273        bool prev, curr;
 274        int err;
 275
 276        if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
 277                return;
 278
 279        /*
 280         * Some platforms (...!) generate input events which affect the
 281         * _hard_ kill state -- whenever something tries to change the
 282         * current software state query the hardware state too.
 283         */
 284        if (rfkill->ops->query)
 285                rfkill->ops->query(rfkill, rfkill->data);
 286
 287        spin_lock_irqsave(&rfkill->lock, flags);
 288        prev = rfkill->state & RFKILL_BLOCK_SW;
 289
 290        if (rfkill->state & RFKILL_BLOCK_SW)
 291                rfkill->state |= RFKILL_BLOCK_SW_PREV;
 292        else
 293                rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
 294
 295        if (blocked)
 296                rfkill->state |= RFKILL_BLOCK_SW;
 297        else
 298                rfkill->state &= ~RFKILL_BLOCK_SW;
 299
 300        rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
 301        spin_unlock_irqrestore(&rfkill->lock, flags);
 302
 303        err = rfkill->ops->set_block(rfkill->data, blocked);
 304
 305        spin_lock_irqsave(&rfkill->lock, flags);
 306        if (err) {
 307                /*
 308                 * Failed -- reset status to _prev, this may be different
 309                 * from what set set _PREV to earlier in this function
 310                 * if rfkill_set_sw_state was invoked.
 311                 */
 312                if (rfkill->state & RFKILL_BLOCK_SW_PREV)
 313                        rfkill->state |= RFKILL_BLOCK_SW;
 314                else
 315                        rfkill->state &= ~RFKILL_BLOCK_SW;
 316        }
 317        rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
 318        rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
 319        curr = rfkill->state & RFKILL_BLOCK_SW;
 320        spin_unlock_irqrestore(&rfkill->lock, flags);
 321
 322        rfkill_led_trigger_event(rfkill);
 323
 324        if (prev != curr)
 325                rfkill_event(rfkill);
 326}
 327
 328#ifdef CONFIG_RFKILL_INPUT
 329static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
 330
 331/**
 332 * __rfkill_switch_all - Toggle state of all switches of given type
 333 * @type: type of interfaces to be affected
 334 * @state: the new state
 335 *
 336 * This function sets the state of all switches of given type,
 337 * unless a specific switch is claimed by userspace (in which case,
 338 * that switch is left alone) or suspended.
 339 *
 340 * Caller must have acquired rfkill_global_mutex.
 341 */
 342static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
 343{
 344        struct rfkill *rfkill;
 345
 346        rfkill_global_states[type].cur = blocked;
 347        list_for_each_entry(rfkill, &rfkill_list, node) {
 348                if (rfkill->type != type && type != RFKILL_TYPE_ALL)
 349                        continue;
 350
 351                rfkill_set_block(rfkill, blocked);
 352        }
 353}
 354
 355/**
 356 * rfkill_switch_all - Toggle state of all switches of given type
 357 * @type: type of interfaces to be affected
 358 * @state: the new state
 359 *
 360 * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
 361 * Please refer to __rfkill_switch_all() for details.
 362 *
 363 * Does nothing if the EPO lock is active.
 364 */
 365void rfkill_switch_all(enum rfkill_type type, bool blocked)
 366{
 367        if (atomic_read(&rfkill_input_disabled))
 368                return;
 369
 370        mutex_lock(&rfkill_global_mutex);
 371
 372        if (!rfkill_epo_lock_active)
 373                __rfkill_switch_all(type, blocked);
 374
 375        mutex_unlock(&rfkill_global_mutex);
 376}
 377
 378/**
 379 * rfkill_epo - emergency power off all transmitters
 380 *
 381 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
 382 * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
 383 *
 384 * The global state before the EPO is saved and can be restored later
 385 * using rfkill_restore_states().
 386 */
 387void rfkill_epo(void)
 388{
 389        struct rfkill *rfkill;
 390        int i;
 391
 392        if (atomic_read(&rfkill_input_disabled))
 393                return;
 394
 395        mutex_lock(&rfkill_global_mutex);
 396
 397        rfkill_epo_lock_active = true;
 398        list_for_each_entry(rfkill, &rfkill_list, node)
 399                rfkill_set_block(rfkill, true);
 400
 401        for (i = 0; i < NUM_RFKILL_TYPES; i++) {
 402                rfkill_global_states[i].sav = rfkill_global_states[i].cur;
 403                rfkill_global_states[i].cur = true;
 404        }
 405
 406        mutex_unlock(&rfkill_global_mutex);
 407}
 408
 409/**
 410 * rfkill_restore_states - restore global states
 411 *
 412 * Restore (and sync switches to) the global state from the
 413 * states in rfkill_default_states.  This can undo the effects of
 414 * a call to rfkill_epo().
 415 */
 416void rfkill_restore_states(void)
 417{
 418        int i;
 419
 420        if (atomic_read(&rfkill_input_disabled))
 421                return;
 422
 423        mutex_lock(&rfkill_global_mutex);
 424
 425        rfkill_epo_lock_active = false;
 426        for (i = 0; i < NUM_RFKILL_TYPES; i++)
 427                __rfkill_switch_all(i, rfkill_global_states[i].sav);
 428        mutex_unlock(&rfkill_global_mutex);
 429}
 430
 431/**
 432 * rfkill_remove_epo_lock - unlock state changes
 433 *
 434 * Used by rfkill-input manually unlock state changes, when
 435 * the EPO switch is deactivated.
 436 */
 437void rfkill_remove_epo_lock(void)
 438{
 439        if (atomic_read(&rfkill_input_disabled))
 440                return;
 441
 442        mutex_lock(&rfkill_global_mutex);
 443        rfkill_epo_lock_active = false;
 444        mutex_unlock(&rfkill_global_mutex);
 445}
 446
 447/**
 448 * rfkill_is_epo_lock_active - returns true EPO is active
 449 *
 450 * Returns 0 (false) if there is NOT an active EPO contidion,
 451 * and 1 (true) if there is an active EPO contition, which
 452 * locks all radios in one of the BLOCKED states.
 453 *
 454 * Can be called in atomic context.
 455 */
 456bool rfkill_is_epo_lock_active(void)
 457{
 458        return rfkill_epo_lock_active;
 459}
 460
 461/**
 462 * rfkill_get_global_sw_state - returns global state for a type
 463 * @type: the type to get the global state of
 464 *
 465 * Returns the current global state for a given wireless
 466 * device type.
 467 */
 468bool rfkill_get_global_sw_state(const enum rfkill_type type)
 469{
 470        return rfkill_global_states[type].cur;
 471}
 472#endif
 473
 474
 475bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
 476{
 477        bool ret, change;
 478
 479        ret = __rfkill_set_hw_state(rfkill, blocked, &change);
 480
 481        if (!rfkill->registered)
 482                return ret;
 483
 484        if (change)
 485                schedule_work(&rfkill->uevent_work);
 486
 487        return ret;
 488}
 489EXPORT_SYMBOL(rfkill_set_hw_state);
 490
 491static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
 492{
 493        u32 bit = RFKILL_BLOCK_SW;
 494
 495        /* if in a ops->set_block right now, use other bit */
 496        if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
 497                bit = RFKILL_BLOCK_SW_PREV;
 498
 499        if (blocked)
 500                rfkill->state |= bit;
 501        else
 502                rfkill->state &= ~bit;
 503}
 504
 505bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
 506{
 507        unsigned long flags;
 508        bool prev, hwblock;
 509
 510        BUG_ON(!rfkill);
 511
 512        spin_lock_irqsave(&rfkill->lock, flags);
 513        prev = !!(rfkill->state & RFKILL_BLOCK_SW);
 514        __rfkill_set_sw_state(rfkill, blocked);
 515        hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
 516        blocked = blocked || hwblock;
 517        spin_unlock_irqrestore(&rfkill->lock, flags);
 518
 519        if (!rfkill->registered)
 520                return blocked;
 521
 522        if (prev != blocked && !hwblock)
 523                schedule_work(&rfkill->uevent_work);
 524
 525        rfkill_led_trigger_event(rfkill);
 526
 527        return blocked;
 528}
 529EXPORT_SYMBOL(rfkill_set_sw_state);
 530
 531void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
 532{
 533        unsigned long flags;
 534
 535        BUG_ON(!rfkill);
 536        BUG_ON(rfkill->registered);
 537
 538        spin_lock_irqsave(&rfkill->lock, flags);
 539        __rfkill_set_sw_state(rfkill, blocked);
 540        rfkill->persistent = true;
 541        spin_unlock_irqrestore(&rfkill->lock, flags);
 542}
 543EXPORT_SYMBOL(rfkill_init_sw_state);
 544
 545void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
 546{
 547        unsigned long flags;
 548        bool swprev, hwprev;
 549
 550        BUG_ON(!rfkill);
 551
 552        spin_lock_irqsave(&rfkill->lock, flags);
 553
 554        /*
 555         * No need to care about prev/setblock ... this is for uevent only
 556         * and that will get triggered by rfkill_set_block anyway.
 557         */
 558        swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
 559        hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
 560        __rfkill_set_sw_state(rfkill, sw);
 561        if (hw)
 562                rfkill->state |= RFKILL_BLOCK_HW;
 563        else
 564                rfkill->state &= ~RFKILL_BLOCK_HW;
 565
 566        spin_unlock_irqrestore(&rfkill->lock, flags);
 567
 568        if (!rfkill->registered) {
 569                rfkill->persistent = true;
 570        } else {
 571                if (swprev != sw || hwprev != hw)
 572                        schedule_work(&rfkill->uevent_work);
 573
 574                rfkill_led_trigger_event(rfkill);
 575        }
 576}
 577EXPORT_SYMBOL(rfkill_set_states);
 578
 579static ssize_t rfkill_name_show(struct device *dev,
 580                                struct device_attribute *attr,
 581                                char *buf)
 582{
 583        struct rfkill *rfkill = to_rfkill(dev);
 584
 585        return sprintf(buf, "%s\n", rfkill->name);
 586}
 587
 588static const char *rfkill_get_type_str(enum rfkill_type type)
 589{
 590        BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_NFC + 1);
 591
 592        switch (type) {
 593        case RFKILL_TYPE_WLAN:
 594                return "wlan";
 595        case RFKILL_TYPE_BLUETOOTH:
 596                return "bluetooth";
 597        case RFKILL_TYPE_UWB:
 598                return "ultrawideband";
 599        case RFKILL_TYPE_WIMAX:
 600                return "wimax";
 601        case RFKILL_TYPE_WWAN:
 602                return "wwan";
 603        case RFKILL_TYPE_GPS:
 604                return "gps";
 605        case RFKILL_TYPE_FM:
 606                return "fm";
 607        case RFKILL_TYPE_NFC:
 608                return "nfc";
 609        default:
 610                BUG();
 611        }
 612}
 613
 614static ssize_t rfkill_type_show(struct device *dev,
 615                                struct device_attribute *attr,
 616                                char *buf)
 617{
 618        struct rfkill *rfkill = to_rfkill(dev);
 619
 620        return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
 621}
 622
 623static ssize_t rfkill_idx_show(struct device *dev,
 624                               struct device_attribute *attr,
 625                               char *buf)
 626{
 627        struct rfkill *rfkill = to_rfkill(dev);
 628
 629        return sprintf(buf, "%d\n", rfkill->idx);
 630}
 631
 632static ssize_t rfkill_persistent_show(struct device *dev,
 633                               struct device_attribute *attr,
 634                               char *buf)
 635{
 636        struct rfkill *rfkill = to_rfkill(dev);
 637
 638        return sprintf(buf, "%d\n", rfkill->persistent);
 639}
 640
 641static ssize_t rfkill_hard_show(struct device *dev,
 642                                 struct device_attribute *attr,
 643                                 char *buf)
 644{
 645        struct rfkill *rfkill = to_rfkill(dev);
 646
 647        return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
 648}
 649
 650static ssize_t rfkill_soft_show(struct device *dev,
 651                                 struct device_attribute *attr,
 652                                 char *buf)
 653{
 654        struct rfkill *rfkill = to_rfkill(dev);
 655
 656        return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
 657}
 658
 659static ssize_t rfkill_soft_store(struct device *dev,
 660                                  struct device_attribute *attr,
 661                                  const char *buf, size_t count)
 662{
 663        struct rfkill *rfkill = to_rfkill(dev);
 664        unsigned long state;
 665        int err;
 666
 667        if (!capable(CAP_NET_ADMIN))
 668                return -EPERM;
 669
 670        err = kstrtoul(buf, 0, &state);
 671        if (err)
 672                return err;
 673
 674        if (state > 1 )
 675                return -EINVAL;
 676
 677        mutex_lock(&rfkill_global_mutex);
 678        rfkill_set_block(rfkill, state);
 679        mutex_unlock(&rfkill_global_mutex);
 680
 681        return count;
 682}
 683
 684static u8 user_state_from_blocked(unsigned long state)
 685{
 686        if (state & RFKILL_BLOCK_HW)
 687                return RFKILL_USER_STATE_HARD_BLOCKED;
 688        if (state & RFKILL_BLOCK_SW)
 689                return RFKILL_USER_STATE_SOFT_BLOCKED;
 690
 691        return RFKILL_USER_STATE_UNBLOCKED;
 692}
 693
 694static ssize_t rfkill_state_show(struct device *dev,
 695                                 struct device_attribute *attr,
 696                                 char *buf)
 697{
 698        struct rfkill *rfkill = to_rfkill(dev);
 699
 700        return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
 701}
 702
 703static ssize_t rfkill_state_store(struct device *dev,
 704                                  struct device_attribute *attr,
 705                                  const char *buf, size_t count)
 706{
 707        struct rfkill *rfkill = to_rfkill(dev);
 708        unsigned long state;
 709        int err;
 710
 711        if (!capable(CAP_NET_ADMIN))
 712                return -EPERM;
 713
 714        err = kstrtoul(buf, 0, &state);
 715        if (err)
 716                return err;
 717
 718        if (state != RFKILL_USER_STATE_SOFT_BLOCKED &&
 719            state != RFKILL_USER_STATE_UNBLOCKED)
 720                return -EINVAL;
 721
 722        mutex_lock(&rfkill_global_mutex);
 723        rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
 724        mutex_unlock(&rfkill_global_mutex);
 725
 726        return count;
 727}
 728
 729static ssize_t rfkill_claim_show(struct device *dev,
 730                                 struct device_attribute *attr,
 731                                 char *buf)
 732{
 733        return sprintf(buf, "%d\n", 0);
 734}
 735
 736static ssize_t rfkill_claim_store(struct device *dev,
 737                                  struct device_attribute *attr,
 738                                  const char *buf, size_t count)
 739{
 740        return -EOPNOTSUPP;
 741}
 742
 743static struct device_attribute rfkill_dev_attrs[] = {
 744        __ATTR(name, S_IRUGO, rfkill_name_show, NULL),
 745        __ATTR(type, S_IRUGO, rfkill_type_show, NULL),
 746        __ATTR(index, S_IRUGO, rfkill_idx_show, NULL),
 747        __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
 748        __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
 749        __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
 750        __ATTR(soft, S_IRUGO|S_IWUSR, rfkill_soft_show, rfkill_soft_store),
 751        __ATTR(hard, S_IRUGO, rfkill_hard_show, NULL),
 752        __ATTR_NULL
 753};
 754
 755static void rfkill_release(struct device *dev)
 756{
 757        struct rfkill *rfkill = to_rfkill(dev);
 758
 759        kfree(rfkill);
 760}
 761
 762static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
 763{
 764        struct rfkill *rfkill = to_rfkill(dev);
 765        unsigned long flags;
 766        u32 state;
 767        int error;
 768
 769        error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
 770        if (error)
 771                return error;
 772        error = add_uevent_var(env, "RFKILL_TYPE=%s",
 773                               rfkill_get_type_str(rfkill->type));
 774        if (error)
 775                return error;
 776        spin_lock_irqsave(&rfkill->lock, flags);
 777        state = rfkill->state;
 778        spin_unlock_irqrestore(&rfkill->lock, flags);
 779        error = add_uevent_var(env, "RFKILL_STATE=%d",
 780                               user_state_from_blocked(state));
 781        return error;
 782}
 783
 784void rfkill_pause_polling(struct rfkill *rfkill)
 785{
 786        BUG_ON(!rfkill);
 787
 788        if (!rfkill->ops->poll)
 789                return;
 790
 791        cancel_delayed_work_sync(&rfkill->poll_work);
 792}
 793EXPORT_SYMBOL(rfkill_pause_polling);
 794
 795void rfkill_resume_polling(struct rfkill *rfkill)
 796{
 797        BUG_ON(!rfkill);
 798
 799        if (!rfkill->ops->poll)
 800                return;
 801
 802        schedule_work(&rfkill->poll_work.work);
 803}
 804EXPORT_SYMBOL(rfkill_resume_polling);
 805
 806static int rfkill_suspend(struct device *dev, pm_message_t state)
 807{
 808        struct rfkill *rfkill = to_rfkill(dev);
 809
 810        rfkill_pause_polling(rfkill);
 811
 812        return 0;
 813}
 814
 815static int rfkill_resume(struct device *dev)
 816{
 817        struct rfkill *rfkill = to_rfkill(dev);
 818        bool cur;
 819
 820        if (!rfkill->persistent) {
 821                cur = !!(rfkill->state & RFKILL_BLOCK_SW);
 822                rfkill_set_block(rfkill, cur);
 823        }
 824
 825        rfkill_resume_polling(rfkill);
 826
 827        return 0;
 828}
 829
 830static struct class rfkill_class = {
 831        .name           = "rfkill",
 832        .dev_release    = rfkill_release,
 833        .dev_attrs      = rfkill_dev_attrs,
 834        .dev_uevent     = rfkill_dev_uevent,
 835        .suspend        = rfkill_suspend,
 836        .resume         = rfkill_resume,
 837};
 838
 839bool rfkill_blocked(struct rfkill *rfkill)
 840{
 841        unsigned long flags;
 842        u32 state;
 843
 844        spin_lock_irqsave(&rfkill->lock, flags);
 845        state = rfkill->state;
 846        spin_unlock_irqrestore(&rfkill->lock, flags);
 847
 848        return !!(state & RFKILL_BLOCK_ANY);
 849}
 850EXPORT_SYMBOL(rfkill_blocked);
 851
 852
 853struct rfkill * __must_check rfkill_alloc(const char *name,
 854                                          struct device *parent,
 855                                          const enum rfkill_type type,
 856                                          const struct rfkill_ops *ops,
 857                                          void *ops_data)
 858{
 859        struct rfkill *rfkill;
 860        struct device *dev;
 861
 862        if (WARN_ON(!ops))
 863                return NULL;
 864
 865        if (WARN_ON(!ops->set_block))
 866                return NULL;
 867
 868        if (WARN_ON(!name))
 869                return NULL;
 870
 871        if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
 872                return NULL;
 873
 874        rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
 875        if (!rfkill)
 876                return NULL;
 877
 878        spin_lock_init(&rfkill->lock);
 879        INIT_LIST_HEAD(&rfkill->node);
 880        rfkill->type = type;
 881        rfkill->name = name;
 882        rfkill->ops = ops;
 883        rfkill->data = ops_data;
 884
 885        dev = &rfkill->dev;
 886        dev->class = &rfkill_class;
 887        dev->parent = parent;
 888        device_initialize(dev);
 889
 890        return rfkill;
 891}
 892EXPORT_SYMBOL(rfkill_alloc);
 893
 894static void rfkill_poll(struct work_struct *work)
 895{
 896        struct rfkill *rfkill;
 897
 898        rfkill = container_of(work, struct rfkill, poll_work.work);
 899
 900        /*
 901         * Poll hardware state -- driver will use one of the
 902         * rfkill_set{,_hw,_sw}_state functions and use its
 903         * return value to update the current status.
 904         */
 905        rfkill->ops->poll(rfkill, rfkill->data);
 906
 907        schedule_delayed_work(&rfkill->poll_work,
 908                round_jiffies_relative(POLL_INTERVAL));
 909}
 910
 911static void rfkill_uevent_work(struct work_struct *work)
 912{
 913        struct rfkill *rfkill;
 914
 915        rfkill = container_of(work, struct rfkill, uevent_work);
 916
 917        mutex_lock(&rfkill_global_mutex);
 918        rfkill_event(rfkill);
 919        mutex_unlock(&rfkill_global_mutex);
 920}
 921
 922static void rfkill_sync_work(struct work_struct *work)
 923{
 924        struct rfkill *rfkill;
 925        bool cur;
 926
 927        rfkill = container_of(work, struct rfkill, sync_work);
 928
 929        mutex_lock(&rfkill_global_mutex);
 930        cur = rfkill_global_states[rfkill->type].cur;
 931        rfkill_set_block(rfkill, cur);
 932        mutex_unlock(&rfkill_global_mutex);
 933}
 934
 935int __must_check rfkill_register(struct rfkill *rfkill)
 936{
 937        static unsigned long rfkill_no;
 938        struct device *dev = &rfkill->dev;
 939        int error;
 940
 941        BUG_ON(!rfkill);
 942
 943        mutex_lock(&rfkill_global_mutex);
 944
 945        if (rfkill->registered) {
 946                error = -EALREADY;
 947                goto unlock;
 948        }
 949
 950        rfkill->idx = rfkill_no;
 951        dev_set_name(dev, "rfkill%lu", rfkill_no);
 952        rfkill_no++;
 953
 954        list_add_tail(&rfkill->node, &rfkill_list);
 955
 956        error = device_add(dev);
 957        if (error)
 958                goto remove;
 959
 960        error = rfkill_led_trigger_register(rfkill);
 961        if (error)
 962                goto devdel;
 963
 964        rfkill->registered = true;
 965
 966        INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
 967        INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
 968        INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
 969
 970        if (rfkill->ops->poll)
 971                schedule_delayed_work(&rfkill->poll_work,
 972                        round_jiffies_relative(POLL_INTERVAL));
 973
 974        if (!rfkill->persistent || rfkill_epo_lock_active) {
 975                schedule_work(&rfkill->sync_work);
 976        } else {
 977#ifdef CONFIG_RFKILL_INPUT
 978                bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
 979
 980                if (!atomic_read(&rfkill_input_disabled))
 981                        __rfkill_switch_all(rfkill->type, soft_blocked);
 982#endif
 983        }
 984
 985        rfkill_send_events(rfkill, RFKILL_OP_ADD);
 986
 987        mutex_unlock(&rfkill_global_mutex);
 988        return 0;
 989
 990 devdel:
 991        device_del(&rfkill->dev);
 992 remove:
 993        list_del_init(&rfkill->node);
 994 unlock:
 995        mutex_unlock(&rfkill_global_mutex);
 996        return error;
 997}
 998EXPORT_SYMBOL(rfkill_register);
 999
1000void rfkill_unregister(struct rfkill *rfkill)
1001{
1002        BUG_ON(!rfkill);
1003
1004        if (rfkill->ops->poll)
1005                cancel_delayed_work_sync(&rfkill->poll_work);
1006
1007        cancel_work_sync(&rfkill->uevent_work);
1008        cancel_work_sync(&rfkill->sync_work);
1009
1010        rfkill->registered = false;
1011
1012        device_del(&rfkill->dev);
1013
1014        mutex_lock(&rfkill_global_mutex);
1015        rfkill_send_events(rfkill, RFKILL_OP_DEL);
1016        list_del_init(&rfkill->node);
1017        mutex_unlock(&rfkill_global_mutex);
1018
1019        rfkill_led_trigger_unregister(rfkill);
1020}
1021EXPORT_SYMBOL(rfkill_unregister);
1022
1023void rfkill_destroy(struct rfkill *rfkill)
1024{
1025        if (rfkill)
1026                put_device(&rfkill->dev);
1027}
1028EXPORT_SYMBOL(rfkill_destroy);
1029
1030static int rfkill_fop_open(struct inode *inode, struct file *file)
1031{
1032        struct rfkill_data *data;
1033        struct rfkill *rfkill;
1034        struct rfkill_int_event *ev, *tmp;
1035
1036        data = kzalloc(sizeof(*data), GFP_KERNEL);
1037        if (!data)
1038                return -ENOMEM;
1039
1040        INIT_LIST_HEAD(&data->events);
1041        mutex_init(&data->mtx);
1042        init_waitqueue_head(&data->read_wait);
1043
1044        mutex_lock(&rfkill_global_mutex);
1045        mutex_lock(&data->mtx);
1046        /*
1047         * start getting events from elsewhere but hold mtx to get
1048         * startup events added first
1049         */
1050
1051        list_for_each_entry(rfkill, &rfkill_list, node) {
1052                ev = kzalloc(sizeof(*ev), GFP_KERNEL);
1053                if (!ev)
1054                        goto free;
1055                rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
1056                list_add_tail(&ev->list, &data->events);
1057        }
1058        list_add(&data->list, &rfkill_fds);
1059        mutex_unlock(&data->mtx);
1060        mutex_unlock(&rfkill_global_mutex);
1061
1062        file->private_data = data;
1063
1064        return nonseekable_open(inode, file);
1065
1066 free:
1067        mutex_unlock(&data->mtx);
1068        mutex_unlock(&rfkill_global_mutex);
1069        mutex_destroy(&data->mtx);
1070        list_for_each_entry_safe(ev, tmp, &data->events, list)
1071                kfree(ev);
1072        kfree(data);
1073        return -ENOMEM;
1074}
1075
1076static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
1077{
1078        struct rfkill_data *data = file->private_data;
1079        unsigned int res = POLLOUT | POLLWRNORM;
1080
1081        poll_wait(file, &data->read_wait, wait);
1082
1083        mutex_lock(&data->mtx);
1084        if (!list_empty(&data->events))
1085                res = POLLIN | POLLRDNORM;
1086        mutex_unlock(&data->mtx);
1087
1088        return res;
1089}
1090
1091static bool rfkill_readable(struct rfkill_data *data)
1092{
1093        bool r;
1094
1095        mutex_lock(&data->mtx);
1096        r = !list_empty(&data->events);
1097        mutex_unlock(&data->mtx);
1098
1099        return r;
1100}
1101
1102static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
1103                               size_t count, loff_t *pos)
1104{
1105        struct rfkill_data *data = file->private_data;
1106        struct rfkill_int_event *ev;
1107        unsigned long sz;
1108        int ret;
1109
1110        mutex_lock(&data->mtx);
1111
1112        while (list_empty(&data->events)) {
1113                if (file->f_flags & O_NONBLOCK) {
1114                        ret = -EAGAIN;
1115                        goto out;
1116                }
1117                mutex_unlock(&data->mtx);
1118                ret = wait_event_interruptible(data->read_wait,
1119                                               rfkill_readable(data));
1120                mutex_lock(&data->mtx);
1121
1122                if (ret)
1123                        goto out;
1124        }
1125
1126        ev = list_first_entry(&data->events, struct rfkill_int_event,
1127                                list);
1128
1129        sz = min_t(unsigned long, sizeof(ev->ev), count);
1130        ret = sz;
1131        if (copy_to_user(buf, &ev->ev, sz))
1132                ret = -EFAULT;
1133
1134        list_del(&ev->list);
1135        kfree(ev);
1136 out:
1137        mutex_unlock(&data->mtx);
1138        return ret;
1139}
1140
1141static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
1142                                size_t count, loff_t *pos)
1143{
1144        struct rfkill *rfkill;
1145        struct rfkill_event ev;
1146
1147        /* we don't need the 'hard' variable but accept it */
1148        if (count < RFKILL_EVENT_SIZE_V1 - 1)
1149                return -EINVAL;
1150
1151        /*
1152         * Copy as much data as we can accept into our 'ev' buffer,
1153         * but tell userspace how much we've copied so it can determine
1154         * our API version even in a write() call, if it cares.
1155         */
1156        count = min(count, sizeof(ev));
1157        if (copy_from_user(&ev, buf, count))
1158                return -EFAULT;
1159
1160        if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL)
1161                return -EINVAL;
1162
1163        if (ev.type >= NUM_RFKILL_TYPES)
1164                return -EINVAL;
1165
1166        mutex_lock(&rfkill_global_mutex);
1167
1168        if (ev.op == RFKILL_OP_CHANGE_ALL) {
1169                if (ev.type == RFKILL_TYPE_ALL) {
1170                        enum rfkill_type i;
1171                        for (i = 0; i < NUM_RFKILL_TYPES; i++)
1172                                rfkill_global_states[i].cur = ev.soft;
1173                } else {
1174                        rfkill_global_states[ev.type].cur = ev.soft;
1175                }
1176        }
1177
1178        list_for_each_entry(rfkill, &rfkill_list, node) {
1179                if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL)
1180                        continue;
1181
1182                if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL)
1183                        continue;
1184
1185                rfkill_set_block(rfkill, ev.soft);
1186        }
1187        mutex_unlock(&rfkill_global_mutex);
1188
1189        return count;
1190}
1191
1192static int rfkill_fop_release(struct inode *inode, struct file *file)
1193{
1194        struct rfkill_data *data = file->private_data;
1195        struct rfkill_int_event *ev, *tmp;
1196
1197        mutex_lock(&rfkill_global_mutex);
1198        list_del(&data->list);
1199        mutex_unlock(&rfkill_global_mutex);
1200
1201        mutex_destroy(&data->mtx);
1202        list_for_each_entry_safe(ev, tmp, &data->events, list)
1203                kfree(ev);
1204
1205#ifdef CONFIG_RFKILL_INPUT
1206        if (data->input_handler)
1207                if (atomic_dec_return(&rfkill_input_disabled) == 0)
1208                        printk(KERN_DEBUG "rfkill: input handler enabled\n");
1209#endif
1210
1211        kfree(data);
1212
1213        return 0;
1214}
1215
1216#ifdef CONFIG_RFKILL_INPUT
1217static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
1218                             unsigned long arg)
1219{
1220        struct rfkill_data *data = file->private_data;
1221
1222        if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
1223                return -ENOSYS;
1224
1225        if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT)
1226                return -ENOSYS;
1227
1228        mutex_lock(&data->mtx);
1229
1230        if (!data->input_handler) {
1231                if (atomic_inc_return(&rfkill_input_disabled) == 1)
1232                        printk(KERN_DEBUG "rfkill: input handler disabled\n");
1233                data->input_handler = true;
1234        }
1235
1236        mutex_unlock(&data->mtx);
1237
1238        return 0;
1239}
1240#endif
1241
1242static const struct file_operations rfkill_fops = {
1243        .owner          = THIS_MODULE,
1244        .open           = rfkill_fop_open,
1245        .read           = rfkill_fop_read,
1246        .write          = rfkill_fop_write,
1247        .poll           = rfkill_fop_poll,
1248        .release        = rfkill_fop_release,
1249#ifdef CONFIG_RFKILL_INPUT
1250        .unlocked_ioctl = rfkill_fop_ioctl,
1251        .compat_ioctl   = rfkill_fop_ioctl,
1252#endif
1253        .llseek         = no_llseek,
1254};
1255
1256static struct miscdevice rfkill_miscdev = {
1257        .name   = "rfkill",
1258        .fops   = &rfkill_fops,
1259        .minor  = MISC_DYNAMIC_MINOR,
1260};
1261
1262static int __init rfkill_init(void)
1263{
1264        int error;
1265        int i;
1266
1267        for (i = 0; i < NUM_RFKILL_TYPES; i++)
1268                rfkill_global_states[i].cur = !rfkill_default_state;
1269
1270        error = class_register(&rfkill_class);
1271        if (error)
1272                goto out;
1273
1274        error = misc_register(&rfkill_miscdev);
1275        if (error) {
1276                class_unregister(&rfkill_class);
1277                goto out;
1278        }
1279
1280#ifdef CONFIG_RFKILL_INPUT
1281        error = rfkill_handler_init();
1282        if (error) {
1283                misc_deregister(&rfkill_miscdev);
1284                class_unregister(&rfkill_class);
1285                goto out;
1286        }
1287#endif
1288
1289 out:
1290        return error;
1291}
1292subsys_initcall(rfkill_init);
1293
1294static void __exit rfkill_exit(void)
1295{
1296#ifdef CONFIG_RFKILL_INPUT
1297        rfkill_handler_exit();
1298#endif
1299        misc_deregister(&rfkill_miscdev);
1300        class_unregister(&rfkill_class);
1301}
1302module_exit(rfkill_exit);
1303