linux/net/rfkill/core.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2006 - 2007 Ivo van Doorn
   3 * Copyright (C) 2007 Dmitry Torokhov
   4 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include <linux/kernel.h>
  21#include <linux/module.h>
  22#include <linux/init.h>
  23#include <linux/workqueue.h>
  24#include <linux/capability.h>
  25#include <linux/list.h>
  26#include <linux/mutex.h>
  27#include <linux/rfkill.h>
  28#include <linux/sched.h>
  29#include <linux/spinlock.h>
  30#include <linux/device.h>
  31#include <linux/miscdevice.h>
  32#include <linux/wait.h>
  33#include <linux/poll.h>
  34#include <linux/fs.h>
  35#include <linux/slab.h>
  36
  37#include "rfkill.h"
  38
  39#define POLL_INTERVAL           (5 * HZ)
  40
  41#define RFKILL_BLOCK_HW         BIT(0)
  42#define RFKILL_BLOCK_SW         BIT(1)
  43#define RFKILL_BLOCK_SW_PREV    BIT(2)
  44#define RFKILL_BLOCK_ANY        (RFKILL_BLOCK_HW |\
  45                                 RFKILL_BLOCK_SW |\
  46                                 RFKILL_BLOCK_SW_PREV)
  47#define RFKILL_BLOCK_SW_SETCALL BIT(31)
  48
  49struct rfkill {
  50        spinlock_t              lock;
  51
  52        enum rfkill_type        type;
  53
  54        unsigned long           state;
  55
  56        u32                     idx;
  57
  58        bool                    registered;
  59        bool                    persistent;
  60        bool                    polling_paused;
  61        bool                    suspended;
  62
  63        const struct rfkill_ops *ops;
  64        void                    *data;
  65
  66#ifdef CONFIG_RFKILL_LEDS
  67        struct led_trigger      led_trigger;
  68        const char              *ledtrigname;
  69#endif
  70
  71        struct device           dev;
  72        struct list_head        node;
  73
  74        struct delayed_work     poll_work;
  75        struct work_struct      uevent_work;
  76        struct work_struct      sync_work;
  77        char                    name[];
  78};
  79#define to_rfkill(d)    container_of(d, struct rfkill, dev)
  80
  81struct rfkill_int_event {
  82        struct list_head        list;
  83        struct rfkill_event     ev;
  84};
  85
  86struct rfkill_data {
  87        struct list_head        list;
  88        struct list_head        events;
  89        struct mutex            mtx;
  90        wait_queue_head_t       read_wait;
  91        bool                    input_handler;
  92};
  93
  94
  95MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
  96MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
  97MODULE_DESCRIPTION("RF switch support");
  98MODULE_LICENSE("GPL");
  99
 100
 101/*
 102 * The locking here should be made much smarter, we currently have
 103 * a bit of a stupid situation because drivers might want to register
 104 * the rfkill struct under their own lock, and take this lock during
 105 * rfkill method calls -- which will cause an AB-BA deadlock situation.
 106 *
 107 * To fix that, we need to rework this code here to be mostly lock-free
 108 * and only use the mutex for list manipulations, not to protect the
 109 * various other global variables. Then we can avoid holding the mutex
 110 * around driver operations, and all is happy.
 111 */
 112static LIST_HEAD(rfkill_list);  /* list of registered rf switches */
 113static DEFINE_MUTEX(rfkill_global_mutex);
 114static LIST_HEAD(rfkill_fds);   /* list of open fds of /dev/rfkill */
 115
 116static unsigned int rfkill_default_state = 1;
 117module_param_named(default_state, rfkill_default_state, uint, 0444);
 118MODULE_PARM_DESC(default_state,
 119                 "Default initial state for all radio types, 0 = radio off");
 120
 121static struct {
 122        bool cur, sav;
 123} rfkill_global_states[NUM_RFKILL_TYPES];
 124
 125static bool rfkill_epo_lock_active;
 126
 127
 128#ifdef CONFIG_RFKILL_LEDS
 129static void rfkill_led_trigger_event(struct rfkill *rfkill)
 130{
 131        struct led_trigger *trigger;
 132
 133        if (!rfkill->registered)
 134                return;
 135
 136        trigger = &rfkill->led_trigger;
 137
 138        if (rfkill->state & RFKILL_BLOCK_ANY)
 139                led_trigger_event(trigger, LED_OFF);
 140        else
 141                led_trigger_event(trigger, LED_FULL);
 142}
 143
 144static void rfkill_led_trigger_activate(struct led_classdev *led)
 145{
 146        struct rfkill *rfkill;
 147
 148        rfkill = container_of(led->trigger, struct rfkill, led_trigger);
 149
 150        rfkill_led_trigger_event(rfkill);
 151}
 152
 153const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
 154{
 155        return rfkill->led_trigger.name;
 156}
 157EXPORT_SYMBOL(rfkill_get_led_trigger_name);
 158
 159void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
 160{
 161        BUG_ON(!rfkill);
 162
 163        rfkill->ledtrigname = name;
 164}
 165EXPORT_SYMBOL(rfkill_set_led_trigger_name);
 166
 167static int rfkill_led_trigger_register(struct rfkill *rfkill)
 168{
 169        rfkill->led_trigger.name = rfkill->ledtrigname
 170                                        ? : dev_name(&rfkill->dev);
 171        rfkill->led_trigger.activate = rfkill_led_trigger_activate;
 172        return led_trigger_register(&rfkill->led_trigger);
 173}
 174
 175static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
 176{
 177        led_trigger_unregister(&rfkill->led_trigger);
 178}
 179#else
 180static void rfkill_led_trigger_event(struct rfkill *rfkill)
 181{
 182}
 183
 184static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
 185{
 186        return 0;
 187}
 188
 189static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
 190{
 191}
 192#endif /* CONFIG_RFKILL_LEDS */
 193
 194static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
 195                              enum rfkill_operation op)
 196{
 197        unsigned long flags;
 198
 199        ev->idx = rfkill->idx;
 200        ev->type = rfkill->type;
 201        ev->op = op;
 202
 203        spin_lock_irqsave(&rfkill->lock, flags);
 204        ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
 205        ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
 206                                        RFKILL_BLOCK_SW_PREV));
 207        spin_unlock_irqrestore(&rfkill->lock, flags);
 208}
 209
 210static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
 211{
 212        struct rfkill_data *data;
 213        struct rfkill_int_event *ev;
 214
 215        list_for_each_entry(data, &rfkill_fds, list) {
 216                ev = kzalloc(sizeof(*ev), GFP_KERNEL);
 217                if (!ev)
 218                        continue;
 219                rfkill_fill_event(&ev->ev, rfkill, op);
 220                mutex_lock(&data->mtx);
 221                list_add_tail(&ev->list, &data->events);
 222                mutex_unlock(&data->mtx);
 223                wake_up_interruptible(&data->read_wait);
 224        }
 225}
 226
 227static void rfkill_event(struct rfkill *rfkill)
 228{
 229        if (!rfkill->registered)
 230                return;
 231
 232        kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
 233
 234        /* also send event to /dev/rfkill */
 235        rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
 236}
 237
 238/**
 239 * rfkill_set_block - wrapper for set_block method
 240 *
 241 * @rfkill: the rfkill struct to use
 242 * @blocked: the new software state
 243 *
 244 * Calls the set_block method (when applicable) and handles notifications
 245 * etc. as well.
 246 */
 247static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
 248{
 249        unsigned long flags;
 250        bool prev, curr;
 251        int err;
 252
 253        if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
 254                return;
 255
 256        /*
 257         * Some platforms (...!) generate input events which affect the
 258         * _hard_ kill state -- whenever something tries to change the
 259         * current software state query the hardware state too.
 260         */
 261        if (rfkill->ops->query)
 262                rfkill->ops->query(rfkill, rfkill->data);
 263
 264        spin_lock_irqsave(&rfkill->lock, flags);
 265        prev = rfkill->state & RFKILL_BLOCK_SW;
 266
 267        if (prev)
 268                rfkill->state |= RFKILL_BLOCK_SW_PREV;
 269        else
 270                rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
 271
 272        if (blocked)
 273                rfkill->state |= RFKILL_BLOCK_SW;
 274        else
 275                rfkill->state &= ~RFKILL_BLOCK_SW;
 276
 277        rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
 278        spin_unlock_irqrestore(&rfkill->lock, flags);
 279
 280        err = rfkill->ops->set_block(rfkill->data, blocked);
 281
 282        spin_lock_irqsave(&rfkill->lock, flags);
 283        if (err) {
 284                /*
 285                 * Failed -- reset status to _PREV, which may be different
 286                 * from what we have set _PREV to earlier in this function
 287                 * if rfkill_set_sw_state was invoked.
 288                 */
 289                if (rfkill->state & RFKILL_BLOCK_SW_PREV)
 290                        rfkill->state |= RFKILL_BLOCK_SW;
 291                else
 292                        rfkill->state &= ~RFKILL_BLOCK_SW;
 293        }
 294        rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
 295        rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
 296        curr = rfkill->state & RFKILL_BLOCK_SW;
 297        spin_unlock_irqrestore(&rfkill->lock, flags);
 298
 299        rfkill_led_trigger_event(rfkill);
 300
 301        if (prev != curr)
 302                rfkill_event(rfkill);
 303}
 304
 305static void rfkill_update_global_state(enum rfkill_type type, bool blocked)
 306{
 307        int i;
 308
 309        if (type != RFKILL_TYPE_ALL) {
 310                rfkill_global_states[type].cur = blocked;
 311                return;
 312        }
 313
 314        for (i = 0; i < NUM_RFKILL_TYPES; i++)
 315                rfkill_global_states[i].cur = blocked;
 316}
 317
 318#ifdef CONFIG_RFKILL_INPUT
 319static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
 320
 321/**
 322 * __rfkill_switch_all - Toggle state of all switches of given type
 323 * @type: type of interfaces to be affected
 324 * @blocked: the new state
 325 *
 326 * This function sets the state of all switches of given type,
 327 * unless a specific switch is suspended.
 328 *
 329 * Caller must have acquired rfkill_global_mutex.
 330 */
 331static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
 332{
 333        struct rfkill *rfkill;
 334
 335        rfkill_update_global_state(type, blocked);
 336        list_for_each_entry(rfkill, &rfkill_list, node) {
 337                if (rfkill->type != type && type != RFKILL_TYPE_ALL)
 338                        continue;
 339
 340                rfkill_set_block(rfkill, blocked);
 341        }
 342}
 343
 344/**
 345 * rfkill_switch_all - Toggle state of all switches of given type
 346 * @type: type of interfaces to be affected
 347 * @blocked: the new state
 348 *
 349 * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
 350 * Please refer to __rfkill_switch_all() for details.
 351 *
 352 * Does nothing if the EPO lock is active.
 353 */
 354void rfkill_switch_all(enum rfkill_type type, bool blocked)
 355{
 356        if (atomic_read(&rfkill_input_disabled))
 357                return;
 358
 359        mutex_lock(&rfkill_global_mutex);
 360
 361        if (!rfkill_epo_lock_active)
 362                __rfkill_switch_all(type, blocked);
 363
 364        mutex_unlock(&rfkill_global_mutex);
 365}
 366
 367/**
 368 * rfkill_epo - emergency power off all transmitters
 369 *
 370 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
 371 * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
 372 *
 373 * The global state before the EPO is saved and can be restored later
 374 * using rfkill_restore_states().
 375 */
 376void rfkill_epo(void)
 377{
 378        struct rfkill *rfkill;
 379        int i;
 380
 381        if (atomic_read(&rfkill_input_disabled))
 382                return;
 383
 384        mutex_lock(&rfkill_global_mutex);
 385
 386        rfkill_epo_lock_active = true;
 387        list_for_each_entry(rfkill, &rfkill_list, node)
 388                rfkill_set_block(rfkill, true);
 389
 390        for (i = 0; i < NUM_RFKILL_TYPES; i++) {
 391                rfkill_global_states[i].sav = rfkill_global_states[i].cur;
 392                rfkill_global_states[i].cur = true;
 393        }
 394
 395        mutex_unlock(&rfkill_global_mutex);
 396}
 397
 398/**
 399 * rfkill_restore_states - restore global states
 400 *
 401 * Restore (and sync switches to) the global state from the
 402 * states in rfkill_default_states.  This can undo the effects of
 403 * a call to rfkill_epo().
 404 */
 405void rfkill_restore_states(void)
 406{
 407        int i;
 408
 409        if (atomic_read(&rfkill_input_disabled))
 410                return;
 411
 412        mutex_lock(&rfkill_global_mutex);
 413
 414        rfkill_epo_lock_active = false;
 415        for (i = 0; i < NUM_RFKILL_TYPES; i++)
 416                __rfkill_switch_all(i, rfkill_global_states[i].sav);
 417        mutex_unlock(&rfkill_global_mutex);
 418}
 419
 420/**
 421 * rfkill_remove_epo_lock - unlock state changes
 422 *
 423 * Used by rfkill-input manually unlock state changes, when
 424 * the EPO switch is deactivated.
 425 */
 426void rfkill_remove_epo_lock(void)
 427{
 428        if (atomic_read(&rfkill_input_disabled))
 429                return;
 430
 431        mutex_lock(&rfkill_global_mutex);
 432        rfkill_epo_lock_active = false;
 433        mutex_unlock(&rfkill_global_mutex);
 434}
 435
 436/**
 437 * rfkill_is_epo_lock_active - returns true EPO is active
 438 *
 439 * Returns 0 (false) if there is NOT an active EPO contidion,
 440 * and 1 (true) if there is an active EPO contition, which
 441 * locks all radios in one of the BLOCKED states.
 442 *
 443 * Can be called in atomic context.
 444 */
 445bool rfkill_is_epo_lock_active(void)
 446{
 447        return rfkill_epo_lock_active;
 448}
 449
 450/**
 451 * rfkill_get_global_sw_state - returns global state for a type
 452 * @type: the type to get the global state of
 453 *
 454 * Returns the current global state for a given wireless
 455 * device type.
 456 */
 457bool rfkill_get_global_sw_state(const enum rfkill_type type)
 458{
 459        return rfkill_global_states[type].cur;
 460}
 461#endif
 462
 463bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
 464{
 465        unsigned long flags;
 466        bool ret, prev;
 467
 468        BUG_ON(!rfkill);
 469
 470        spin_lock_irqsave(&rfkill->lock, flags);
 471        prev = !!(rfkill->state & RFKILL_BLOCK_HW);
 472        if (blocked)
 473                rfkill->state |= RFKILL_BLOCK_HW;
 474        else
 475                rfkill->state &= ~RFKILL_BLOCK_HW;
 476        ret = !!(rfkill->state & RFKILL_BLOCK_ANY);
 477        spin_unlock_irqrestore(&rfkill->lock, flags);
 478
 479        rfkill_led_trigger_event(rfkill);
 480
 481        if (!rfkill->registered)
 482                return ret;
 483
 484        if (prev != blocked)
 485                schedule_work(&rfkill->uevent_work);
 486
 487        return ret;
 488}
 489EXPORT_SYMBOL(rfkill_set_hw_state);
 490
 491static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
 492{
 493        u32 bit = RFKILL_BLOCK_SW;
 494
 495        /* if in a ops->set_block right now, use other bit */
 496        if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
 497                bit = RFKILL_BLOCK_SW_PREV;
 498
 499        if (blocked)
 500                rfkill->state |= bit;
 501        else
 502                rfkill->state &= ~bit;
 503}
 504
 505bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
 506{
 507        unsigned long flags;
 508        bool prev, hwblock;
 509
 510        BUG_ON(!rfkill);
 511
 512        spin_lock_irqsave(&rfkill->lock, flags);
 513        prev = !!(rfkill->state & RFKILL_BLOCK_SW);
 514        __rfkill_set_sw_state(rfkill, blocked);
 515        hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
 516        blocked = blocked || hwblock;
 517        spin_unlock_irqrestore(&rfkill->lock, flags);
 518
 519        if (!rfkill->registered)
 520                return blocked;
 521
 522        if (prev != blocked && !hwblock)
 523                schedule_work(&rfkill->uevent_work);
 524
 525        rfkill_led_trigger_event(rfkill);
 526
 527        return blocked;
 528}
 529EXPORT_SYMBOL(rfkill_set_sw_state);
 530
 531void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
 532{
 533        unsigned long flags;
 534
 535        BUG_ON(!rfkill);
 536        BUG_ON(rfkill->registered);
 537
 538        spin_lock_irqsave(&rfkill->lock, flags);
 539        __rfkill_set_sw_state(rfkill, blocked);
 540        rfkill->persistent = true;
 541        spin_unlock_irqrestore(&rfkill->lock, flags);
 542}
 543EXPORT_SYMBOL(rfkill_init_sw_state);
 544
 545void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
 546{
 547        unsigned long flags;
 548        bool swprev, hwprev;
 549
 550        BUG_ON(!rfkill);
 551
 552        spin_lock_irqsave(&rfkill->lock, flags);
 553
 554        /*
 555         * No need to care about prev/setblock ... this is for uevent only
 556         * and that will get triggered by rfkill_set_block anyway.
 557         */
 558        swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
 559        hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
 560        __rfkill_set_sw_state(rfkill, sw);
 561        if (hw)
 562                rfkill->state |= RFKILL_BLOCK_HW;
 563        else
 564                rfkill->state &= ~RFKILL_BLOCK_HW;
 565
 566        spin_unlock_irqrestore(&rfkill->lock, flags);
 567
 568        if (!rfkill->registered) {
 569                rfkill->persistent = true;
 570        } else {
 571                if (swprev != sw || hwprev != hw)
 572                        schedule_work(&rfkill->uevent_work);
 573
 574                rfkill_led_trigger_event(rfkill);
 575        }
 576}
 577EXPORT_SYMBOL(rfkill_set_states);
 578
 579static const char * const rfkill_types[] = {
 580        NULL, /* RFKILL_TYPE_ALL */
 581        "wlan",
 582        "bluetooth",
 583        "ultrawideband",
 584        "wimax",
 585        "wwan",
 586        "gps",
 587        "fm",
 588        "nfc",
 589};
 590
 591enum rfkill_type rfkill_find_type(const char *name)
 592{
 593        int i;
 594
 595        BUILD_BUG_ON(ARRAY_SIZE(rfkill_types) != NUM_RFKILL_TYPES);
 596
 597        if (!name)
 598                return RFKILL_TYPE_ALL;
 599
 600        for (i = 1; i < NUM_RFKILL_TYPES; i++)
 601                if (!strcmp(name, rfkill_types[i]))
 602                        return i;
 603        return RFKILL_TYPE_ALL;
 604}
 605EXPORT_SYMBOL(rfkill_find_type);
 606
 607static ssize_t name_show(struct device *dev, struct device_attribute *attr,
 608                         char *buf)
 609{
 610        struct rfkill *rfkill = to_rfkill(dev);
 611
 612        return sprintf(buf, "%s\n", rfkill->name);
 613}
 614static DEVICE_ATTR_RO(name);
 615
 616static ssize_t type_show(struct device *dev, struct device_attribute *attr,
 617                         char *buf)
 618{
 619        struct rfkill *rfkill = to_rfkill(dev);
 620
 621        return sprintf(buf, "%s\n", rfkill_types[rfkill->type]);
 622}
 623static DEVICE_ATTR_RO(type);
 624
 625static ssize_t index_show(struct device *dev, struct device_attribute *attr,
 626                          char *buf)
 627{
 628        struct rfkill *rfkill = to_rfkill(dev);
 629
 630        return sprintf(buf, "%d\n", rfkill->idx);
 631}
 632static DEVICE_ATTR_RO(index);
 633
 634static ssize_t persistent_show(struct device *dev,
 635                               struct device_attribute *attr, char *buf)
 636{
 637        struct rfkill *rfkill = to_rfkill(dev);
 638
 639        return sprintf(buf, "%d\n", rfkill->persistent);
 640}
 641static DEVICE_ATTR_RO(persistent);
 642
 643static ssize_t hard_show(struct device *dev, struct device_attribute *attr,
 644                         char *buf)
 645{
 646        struct rfkill *rfkill = to_rfkill(dev);
 647
 648        return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
 649}
 650static DEVICE_ATTR_RO(hard);
 651
 652static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
 653                         char *buf)
 654{
 655        struct rfkill *rfkill = to_rfkill(dev);
 656
 657        return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
 658}
 659
 660static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
 661                          const char *buf, size_t count)
 662{
 663        struct rfkill *rfkill = to_rfkill(dev);
 664        unsigned long state;
 665        int err;
 666
 667        if (!capable(CAP_NET_ADMIN))
 668                return -EPERM;
 669
 670        err = kstrtoul(buf, 0, &state);
 671        if (err)
 672                return err;
 673
 674        if (state > 1 )
 675                return -EINVAL;
 676
 677        mutex_lock(&rfkill_global_mutex);
 678        rfkill_set_block(rfkill, state);
 679        mutex_unlock(&rfkill_global_mutex);
 680
 681        return count;
 682}
 683static DEVICE_ATTR_RW(soft);
 684
 685static u8 user_state_from_blocked(unsigned long state)
 686{
 687        if (state & RFKILL_BLOCK_HW)
 688                return RFKILL_USER_STATE_HARD_BLOCKED;
 689        if (state & RFKILL_BLOCK_SW)
 690                return RFKILL_USER_STATE_SOFT_BLOCKED;
 691
 692        return RFKILL_USER_STATE_UNBLOCKED;
 693}
 694
 695static ssize_t state_show(struct device *dev, struct device_attribute *attr,
 696                          char *buf)
 697{
 698        struct rfkill *rfkill = to_rfkill(dev);
 699
 700        return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
 701}
 702
 703static ssize_t state_store(struct device *dev, struct device_attribute *attr,
 704                           const char *buf, size_t count)
 705{
 706        struct rfkill *rfkill = to_rfkill(dev);
 707        unsigned long state;
 708        int err;
 709
 710        if (!capable(CAP_NET_ADMIN))
 711                return -EPERM;
 712
 713        err = kstrtoul(buf, 0, &state);
 714        if (err)
 715                return err;
 716
 717        if (state != RFKILL_USER_STATE_SOFT_BLOCKED &&
 718            state != RFKILL_USER_STATE_UNBLOCKED)
 719                return -EINVAL;
 720
 721        mutex_lock(&rfkill_global_mutex);
 722        rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
 723        mutex_unlock(&rfkill_global_mutex);
 724
 725        return count;
 726}
 727static DEVICE_ATTR_RW(state);
 728
 729static struct attribute *rfkill_dev_attrs[] = {
 730        &dev_attr_name.attr,
 731        &dev_attr_type.attr,
 732        &dev_attr_index.attr,
 733        &dev_attr_persistent.attr,
 734        &dev_attr_state.attr,
 735        &dev_attr_soft.attr,
 736        &dev_attr_hard.attr,
 737        NULL,
 738};
 739ATTRIBUTE_GROUPS(rfkill_dev);
 740
 741static void rfkill_release(struct device *dev)
 742{
 743        struct rfkill *rfkill = to_rfkill(dev);
 744
 745        kfree(rfkill);
 746}
 747
 748static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
 749{
 750        struct rfkill *rfkill = to_rfkill(dev);
 751        unsigned long flags;
 752        u32 state;
 753        int error;
 754
 755        error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
 756        if (error)
 757                return error;
 758        error = add_uevent_var(env, "RFKILL_TYPE=%s",
 759                               rfkill_types[rfkill->type]);
 760        if (error)
 761                return error;
 762        spin_lock_irqsave(&rfkill->lock, flags);
 763        state = rfkill->state;
 764        spin_unlock_irqrestore(&rfkill->lock, flags);
 765        error = add_uevent_var(env, "RFKILL_STATE=%d",
 766                               user_state_from_blocked(state));
 767        return error;
 768}
 769
 770void rfkill_pause_polling(struct rfkill *rfkill)
 771{
 772        BUG_ON(!rfkill);
 773
 774        if (!rfkill->ops->poll)
 775                return;
 776
 777        rfkill->polling_paused = true;
 778        cancel_delayed_work_sync(&rfkill->poll_work);
 779}
 780EXPORT_SYMBOL(rfkill_pause_polling);
 781
 782void rfkill_resume_polling(struct rfkill *rfkill)
 783{
 784        BUG_ON(!rfkill);
 785
 786        if (!rfkill->ops->poll)
 787                return;
 788
 789        rfkill->polling_paused = false;
 790
 791        if (rfkill->suspended)
 792                return;
 793
 794        queue_delayed_work(system_power_efficient_wq,
 795                           &rfkill->poll_work, 0);
 796}
 797EXPORT_SYMBOL(rfkill_resume_polling);
 798
 799#ifdef CONFIG_PM_SLEEP
 800static int rfkill_suspend(struct device *dev)
 801{
 802        struct rfkill *rfkill = to_rfkill(dev);
 803
 804        rfkill->suspended = true;
 805        cancel_delayed_work_sync(&rfkill->poll_work);
 806
 807        return 0;
 808}
 809
 810static int rfkill_resume(struct device *dev)
 811{
 812        struct rfkill *rfkill = to_rfkill(dev);
 813        bool cur;
 814
 815        rfkill->suspended = false;
 816
 817        if (!rfkill->persistent) {
 818                cur = !!(rfkill->state & RFKILL_BLOCK_SW);
 819                rfkill_set_block(rfkill, cur);
 820        }
 821
 822        if (rfkill->ops->poll && !rfkill->polling_paused)
 823                queue_delayed_work(system_power_efficient_wq,
 824                                   &rfkill->poll_work, 0);
 825
 826        return 0;
 827}
 828
 829static SIMPLE_DEV_PM_OPS(rfkill_pm_ops, rfkill_suspend, rfkill_resume);
 830#define RFKILL_PM_OPS (&rfkill_pm_ops)
 831#else
 832#define RFKILL_PM_OPS NULL
 833#endif
 834
 835static struct class rfkill_class = {
 836        .name           = "rfkill",
 837        .dev_release    = rfkill_release,
 838        .dev_groups     = rfkill_dev_groups,
 839        .dev_uevent     = rfkill_dev_uevent,
 840        .pm             = RFKILL_PM_OPS,
 841};
 842
 843bool rfkill_blocked(struct rfkill *rfkill)
 844{
 845        unsigned long flags;
 846        u32 state;
 847
 848        spin_lock_irqsave(&rfkill->lock, flags);
 849        state = rfkill->state;
 850        spin_unlock_irqrestore(&rfkill->lock, flags);
 851
 852        return !!(state & RFKILL_BLOCK_ANY);
 853}
 854EXPORT_SYMBOL(rfkill_blocked);
 855
 856
 857struct rfkill * __must_check rfkill_alloc(const char *name,
 858                                          struct device *parent,
 859                                          const enum rfkill_type type,
 860                                          const struct rfkill_ops *ops,
 861                                          void *ops_data)
 862{
 863        struct rfkill *rfkill;
 864        struct device *dev;
 865
 866        if (WARN_ON(!ops))
 867                return NULL;
 868
 869        if (WARN_ON(!ops->set_block))
 870                return NULL;
 871
 872        if (WARN_ON(!name))
 873                return NULL;
 874
 875        if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
 876                return NULL;
 877
 878        rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
 879        if (!rfkill)
 880                return NULL;
 881
 882        spin_lock_init(&rfkill->lock);
 883        INIT_LIST_HEAD(&rfkill->node);
 884        rfkill->type = type;
 885        strcpy(rfkill->name, name);
 886        rfkill->ops = ops;
 887        rfkill->data = ops_data;
 888
 889        dev = &rfkill->dev;
 890        dev->class = &rfkill_class;
 891        dev->parent = parent;
 892        device_initialize(dev);
 893
 894        return rfkill;
 895}
 896EXPORT_SYMBOL(rfkill_alloc);
 897
 898static void rfkill_poll(struct work_struct *work)
 899{
 900        struct rfkill *rfkill;
 901
 902        rfkill = container_of(work, struct rfkill, poll_work.work);
 903
 904        /*
 905         * Poll hardware state -- driver will use one of the
 906         * rfkill_set{,_hw,_sw}_state functions and use its
 907         * return value to update the current status.
 908         */
 909        rfkill->ops->poll(rfkill, rfkill->data);
 910
 911        queue_delayed_work(system_power_efficient_wq,
 912                &rfkill->poll_work,
 913                round_jiffies_relative(POLL_INTERVAL));
 914}
 915
 916static void rfkill_uevent_work(struct work_struct *work)
 917{
 918        struct rfkill *rfkill;
 919
 920        rfkill = container_of(work, struct rfkill, uevent_work);
 921
 922        mutex_lock(&rfkill_global_mutex);
 923        rfkill_event(rfkill);
 924        mutex_unlock(&rfkill_global_mutex);
 925}
 926
 927static void rfkill_sync_work(struct work_struct *work)
 928{
 929        struct rfkill *rfkill;
 930        bool cur;
 931
 932        rfkill = container_of(work, struct rfkill, sync_work);
 933
 934        mutex_lock(&rfkill_global_mutex);
 935        cur = rfkill_global_states[rfkill->type].cur;
 936        rfkill_set_block(rfkill, cur);
 937        mutex_unlock(&rfkill_global_mutex);
 938}
 939
 940int __must_check rfkill_register(struct rfkill *rfkill)
 941{
 942        static unsigned long rfkill_no;
 943        struct device *dev = &rfkill->dev;
 944        int error;
 945
 946        BUG_ON(!rfkill);
 947
 948        mutex_lock(&rfkill_global_mutex);
 949
 950        if (rfkill->registered) {
 951                error = -EALREADY;
 952                goto unlock;
 953        }
 954
 955        rfkill->idx = rfkill_no;
 956        dev_set_name(dev, "rfkill%lu", rfkill_no);
 957        rfkill_no++;
 958
 959        list_add_tail(&rfkill->node, &rfkill_list);
 960
 961        error = device_add(dev);
 962        if (error)
 963                goto remove;
 964
 965        error = rfkill_led_trigger_register(rfkill);
 966        if (error)
 967                goto devdel;
 968
 969        rfkill->registered = true;
 970
 971        INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
 972        INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
 973        INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
 974
 975        if (rfkill->ops->poll)
 976                queue_delayed_work(system_power_efficient_wq,
 977                        &rfkill->poll_work,
 978                        round_jiffies_relative(POLL_INTERVAL));
 979
 980        if (!rfkill->persistent || rfkill_epo_lock_active) {
 981                schedule_work(&rfkill->sync_work);
 982        } else {
 983#ifdef CONFIG_RFKILL_INPUT
 984                bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
 985
 986                if (!atomic_read(&rfkill_input_disabled))
 987                        __rfkill_switch_all(rfkill->type, soft_blocked);
 988#endif
 989        }
 990
 991        rfkill_send_events(rfkill, RFKILL_OP_ADD);
 992
 993        mutex_unlock(&rfkill_global_mutex);
 994        return 0;
 995
 996 devdel:
 997        device_del(&rfkill->dev);
 998 remove:
 999        list_del_init(&rfkill->node);
1000 unlock:
1001        mutex_unlock(&rfkill_global_mutex);
1002        return error;
1003}
1004EXPORT_SYMBOL(rfkill_register);
1005
1006void rfkill_unregister(struct rfkill *rfkill)
1007{
1008        BUG_ON(!rfkill);
1009
1010        if (rfkill->ops->poll)
1011                cancel_delayed_work_sync(&rfkill->poll_work);
1012
1013        cancel_work_sync(&rfkill->uevent_work);
1014        cancel_work_sync(&rfkill->sync_work);
1015
1016        rfkill->registered = false;
1017
1018        device_del(&rfkill->dev);
1019
1020        mutex_lock(&rfkill_global_mutex);
1021        rfkill_send_events(rfkill, RFKILL_OP_DEL);
1022        list_del_init(&rfkill->node);
1023        mutex_unlock(&rfkill_global_mutex);
1024
1025        rfkill_led_trigger_unregister(rfkill);
1026}
1027EXPORT_SYMBOL(rfkill_unregister);
1028
1029void rfkill_destroy(struct rfkill *rfkill)
1030{
1031        if (rfkill)
1032                put_device(&rfkill->dev);
1033}
1034EXPORT_SYMBOL(rfkill_destroy);
1035
1036static int rfkill_fop_open(struct inode *inode, struct file *file)
1037{
1038        struct rfkill_data *data;
1039        struct rfkill *rfkill;
1040        struct rfkill_int_event *ev, *tmp;
1041
1042        data = kzalloc(sizeof(*data), GFP_KERNEL);
1043        if (!data)
1044                return -ENOMEM;
1045
1046        INIT_LIST_HEAD(&data->events);
1047        mutex_init(&data->mtx);
1048        init_waitqueue_head(&data->read_wait);
1049
1050        mutex_lock(&rfkill_global_mutex);
1051        mutex_lock(&data->mtx);
1052        /*
1053         * start getting events from elsewhere but hold mtx to get
1054         * startup events added first
1055         */
1056
1057        list_for_each_entry(rfkill, &rfkill_list, node) {
1058                ev = kzalloc(sizeof(*ev), GFP_KERNEL);
1059                if (!ev)
1060                        goto free;
1061                rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
1062                list_add_tail(&ev->list, &data->events);
1063        }
1064        list_add(&data->list, &rfkill_fds);
1065        mutex_unlock(&data->mtx);
1066        mutex_unlock(&rfkill_global_mutex);
1067
1068        file->private_data = data;
1069
1070        return nonseekable_open(inode, file);
1071
1072 free:
1073        mutex_unlock(&data->mtx);
1074        mutex_unlock(&rfkill_global_mutex);
1075        mutex_destroy(&data->mtx);
1076        list_for_each_entry_safe(ev, tmp, &data->events, list)
1077                kfree(ev);
1078        kfree(data);
1079        return -ENOMEM;
1080}
1081
1082static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
1083{
1084        struct rfkill_data *data = file->private_data;
1085        unsigned int res = POLLOUT | POLLWRNORM;
1086
1087        poll_wait(file, &data->read_wait, wait);
1088
1089        mutex_lock(&data->mtx);
1090        if (!list_empty(&data->events))
1091                res = POLLIN | POLLRDNORM;
1092        mutex_unlock(&data->mtx);
1093
1094        return res;
1095}
1096
1097static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
1098                               size_t count, loff_t *pos)
1099{
1100        struct rfkill_data *data = file->private_data;
1101        struct rfkill_int_event *ev;
1102        unsigned long sz;
1103        int ret;
1104
1105        mutex_lock(&data->mtx);
1106
1107        while (list_empty(&data->events)) {
1108                if (file->f_flags & O_NONBLOCK) {
1109                        ret = -EAGAIN;
1110                        goto out;
1111                }
1112                mutex_unlock(&data->mtx);
1113                /* since we re-check and it just compares pointers,
1114                 * using !list_empty() without locking isn't a problem
1115                 */
1116                ret = wait_event_interruptible(data->read_wait,
1117                                               !list_empty(&data->events));
1118                mutex_lock(&data->mtx);
1119
1120                if (ret)
1121                        goto out;
1122        }
1123
1124        ev = list_first_entry(&data->events, struct rfkill_int_event,
1125                                list);
1126
1127        sz = min_t(unsigned long, sizeof(ev->ev), count);
1128        ret = sz;
1129        if (copy_to_user(buf, &ev->ev, sz))
1130                ret = -EFAULT;
1131
1132        list_del(&ev->list);
1133        kfree(ev);
1134 out:
1135        mutex_unlock(&data->mtx);
1136        return ret;
1137}
1138
1139static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
1140                                size_t count, loff_t *pos)
1141{
1142        struct rfkill *rfkill;
1143        struct rfkill_event ev;
1144
1145        /* we don't need the 'hard' variable but accept it */
1146        if (count < RFKILL_EVENT_SIZE_V1 - 1)
1147                return -EINVAL;
1148
1149        /*
1150         * Copy as much data as we can accept into our 'ev' buffer,
1151         * but tell userspace how much we've copied so it can determine
1152         * our API version even in a write() call, if it cares.
1153         */
1154        count = min(count, sizeof(ev));
1155        if (copy_from_user(&ev, buf, count))
1156                return -EFAULT;
1157
1158        if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL)
1159                return -EINVAL;
1160
1161        if (ev.type >= NUM_RFKILL_TYPES)
1162                return -EINVAL;
1163
1164        mutex_lock(&rfkill_global_mutex);
1165
1166        if (ev.op == RFKILL_OP_CHANGE_ALL)
1167                rfkill_update_global_state(ev.type, ev.soft);
1168
1169        list_for_each_entry(rfkill, &rfkill_list, node) {
1170                if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL)
1171                        continue;
1172
1173                if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL)
1174                        continue;
1175
1176                rfkill_set_block(rfkill, ev.soft);
1177        }
1178        mutex_unlock(&rfkill_global_mutex);
1179
1180        return count;
1181}
1182
1183static int rfkill_fop_release(struct inode *inode, struct file *file)
1184{
1185        struct rfkill_data *data = file->private_data;
1186        struct rfkill_int_event *ev, *tmp;
1187
1188        mutex_lock(&rfkill_global_mutex);
1189        list_del(&data->list);
1190        mutex_unlock(&rfkill_global_mutex);
1191
1192        mutex_destroy(&data->mtx);
1193        list_for_each_entry_safe(ev, tmp, &data->events, list)
1194                kfree(ev);
1195
1196#ifdef CONFIG_RFKILL_INPUT
1197        if (data->input_handler)
1198                if (atomic_dec_return(&rfkill_input_disabled) == 0)
1199                        printk(KERN_DEBUG "rfkill: input handler enabled\n");
1200#endif
1201
1202        kfree(data);
1203
1204        return 0;
1205}
1206
1207#ifdef CONFIG_RFKILL_INPUT
1208static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
1209                             unsigned long arg)
1210{
1211        struct rfkill_data *data = file->private_data;
1212
1213        if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
1214                return -ENOSYS;
1215
1216        if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT)
1217                return -ENOSYS;
1218
1219        mutex_lock(&data->mtx);
1220
1221        if (!data->input_handler) {
1222                if (atomic_inc_return(&rfkill_input_disabled) == 1)
1223                        printk(KERN_DEBUG "rfkill: input handler disabled\n");
1224                data->input_handler = true;
1225        }
1226
1227        mutex_unlock(&data->mtx);
1228
1229        return 0;
1230}
1231#endif
1232
1233static const struct file_operations rfkill_fops = {
1234        .owner          = THIS_MODULE,
1235        .open           = rfkill_fop_open,
1236        .read           = rfkill_fop_read,
1237        .write          = rfkill_fop_write,
1238        .poll           = rfkill_fop_poll,
1239        .release        = rfkill_fop_release,
1240#ifdef CONFIG_RFKILL_INPUT
1241        .unlocked_ioctl = rfkill_fop_ioctl,
1242        .compat_ioctl   = rfkill_fop_ioctl,
1243#endif
1244        .llseek         = no_llseek,
1245};
1246
1247static struct miscdevice rfkill_miscdev = {
1248        .name   = "rfkill",
1249        .fops   = &rfkill_fops,
1250        .minor  = MISC_DYNAMIC_MINOR,
1251};
1252
1253static int __init rfkill_init(void)
1254{
1255        int error;
1256
1257        rfkill_update_global_state(RFKILL_TYPE_ALL, !rfkill_default_state);
1258
1259        error = class_register(&rfkill_class);
1260        if (error)
1261                goto out;
1262
1263        error = misc_register(&rfkill_miscdev);
1264        if (error) {
1265                class_unregister(&rfkill_class);
1266                goto out;
1267        }
1268
1269#ifdef CONFIG_RFKILL_INPUT
1270        error = rfkill_handler_init();
1271        if (error) {
1272                misc_deregister(&rfkill_miscdev);
1273                class_unregister(&rfkill_class);
1274                goto out;
1275        }
1276#endif
1277
1278 out:
1279        return error;
1280}
1281subsys_initcall(rfkill_init);
1282
1283static void __exit rfkill_exit(void)
1284{
1285#ifdef CONFIG_RFKILL_INPUT
1286        rfkill_handler_exit();
1287#endif
1288        misc_deregister(&rfkill_miscdev);
1289        class_unregister(&rfkill_class);
1290}
1291module_exit(rfkill_exit);
1292