1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/workqueue.h>
26#include <linux/capability.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/rfkill.h>
30#include <linux/sched.h>
31#include <linux/spinlock.h>
32#include <linux/miscdevice.h>
33#include <linux/wait.h>
34#include <linux/poll.h>
35#include <linux/fs.h>
36
37#include "rfkill.h"
38
39#define POLL_INTERVAL (5 * HZ)
40
41#define RFKILL_BLOCK_HW BIT(0)
42#define RFKILL_BLOCK_SW BIT(1)
43#define RFKILL_BLOCK_SW_PREV BIT(2)
44#define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\
45 RFKILL_BLOCK_SW |\
46 RFKILL_BLOCK_SW_PREV)
47#define RFKILL_BLOCK_SW_SETCALL BIT(31)
48
49struct rfkill {
50 spinlock_t lock;
51
52 const char *name;
53 enum rfkill_type type;
54
55 unsigned long state;
56
57 u32 idx;
58
59 bool registered;
60 bool persistent;
61
62 const struct rfkill_ops *ops;
63 void *data;
64
65#ifdef CONFIG_RFKILL_LEDS
66 struct led_trigger led_trigger;
67 const char *ledtrigname;
68#endif
69
70 struct device dev;
71 struct list_head node;
72
73 struct delayed_work poll_work;
74 struct work_struct uevent_work;
75 struct work_struct sync_work;
76};
77#define to_rfkill(d) container_of(d, struct rfkill, dev)
78
79struct rfkill_int_event {
80 struct list_head list;
81 struct rfkill_event ev;
82};
83
84struct rfkill_data {
85 struct list_head list;
86 struct list_head events;
87 struct mutex mtx;
88 wait_queue_head_t read_wait;
89 bool input_handler;
90};
91
92
93MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
94MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
95MODULE_DESCRIPTION("RF switch support");
96MODULE_LICENSE("GPL");
97
98
99
100
101
102
103
104
105
106
107
108
109
110static LIST_HEAD(rfkill_list);
111static DEFINE_MUTEX(rfkill_global_mutex);
112static LIST_HEAD(rfkill_fds);
113
114static unsigned int rfkill_default_state = 1;
115module_param_named(default_state, rfkill_default_state, uint, 0444);
116MODULE_PARM_DESC(default_state,
117 "Default initial state for all radio types, 0 = radio off");
118
119static struct {
120 bool cur, sav;
121} rfkill_global_states[NUM_RFKILL_TYPES];
122
123static bool rfkill_epo_lock_active;
124
125
126#ifdef CONFIG_RFKILL_LEDS
127static void rfkill_led_trigger_event(struct rfkill *rfkill)
128{
129 struct led_trigger *trigger;
130
131 if (!rfkill->registered)
132 return;
133
134 trigger = &rfkill->led_trigger;
135
136 if (rfkill->state & RFKILL_BLOCK_ANY)
137 led_trigger_event(trigger, LED_OFF);
138 else
139 led_trigger_event(trigger, LED_FULL);
140}
141
142static void rfkill_led_trigger_activate(struct led_classdev *led)
143{
144 struct rfkill *rfkill;
145
146 rfkill = container_of(led->trigger, struct rfkill, led_trigger);
147
148 rfkill_led_trigger_event(rfkill);
149}
150
151const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
152{
153 return rfkill->led_trigger.name;
154}
155EXPORT_SYMBOL(rfkill_get_led_trigger_name);
156
157void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
158{
159 BUG_ON(!rfkill);
160
161 rfkill->ledtrigname = name;
162}
163EXPORT_SYMBOL(rfkill_set_led_trigger_name);
164
165static int rfkill_led_trigger_register(struct rfkill *rfkill)
166{
167 rfkill->led_trigger.name = rfkill->ledtrigname
168 ? : dev_name(&rfkill->dev);
169 rfkill->led_trigger.activate = rfkill_led_trigger_activate;
170 return led_trigger_register(&rfkill->led_trigger);
171}
172
173static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
174{
175 led_trigger_unregister(&rfkill->led_trigger);
176}
177#else
178static void rfkill_led_trigger_event(struct rfkill *rfkill)
179{
180}
181
182static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
183{
184 return 0;
185}
186
187static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
188{
189}
190#endif
191
192static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
193 enum rfkill_operation op)
194{
195 unsigned long flags;
196
197 ev->idx = rfkill->idx;
198 ev->type = rfkill->type;
199 ev->op = op;
200
201 spin_lock_irqsave(&rfkill->lock, flags);
202 ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
203 ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
204 RFKILL_BLOCK_SW_PREV));
205 spin_unlock_irqrestore(&rfkill->lock, flags);
206}
207
208static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
209{
210 struct rfkill_data *data;
211 struct rfkill_int_event *ev;
212
213 list_for_each_entry(data, &rfkill_fds, list) {
214 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
215 if (!ev)
216 continue;
217 rfkill_fill_event(&ev->ev, rfkill, op);
218 mutex_lock(&data->mtx);
219 list_add_tail(&ev->list, &data->events);
220 mutex_unlock(&data->mtx);
221 wake_up_interruptible(&data->read_wait);
222 }
223}
224
225static void rfkill_event(struct rfkill *rfkill)
226{
227 if (!rfkill->registered)
228 return;
229
230 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
231
232
233 rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
234}
235
236static bool __rfkill_set_hw_state(struct rfkill *rfkill,
237 bool blocked, bool *change)
238{
239 unsigned long flags;
240 bool prev, any;
241
242 BUG_ON(!rfkill);
243
244 spin_lock_irqsave(&rfkill->lock, flags);
245 prev = !!(rfkill->state & RFKILL_BLOCK_HW);
246 if (blocked)
247 rfkill->state |= RFKILL_BLOCK_HW;
248 else
249 rfkill->state &= ~RFKILL_BLOCK_HW;
250 *change = prev != blocked;
251 any = rfkill->state & RFKILL_BLOCK_ANY;
252 spin_unlock_irqrestore(&rfkill->lock, flags);
253
254 rfkill_led_trigger_event(rfkill);
255
256 return any;
257}
258
259
260
261
262
263
264
265
266
267
268static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
269{
270 unsigned long flags;
271 int err;
272
273 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
274 return;
275
276
277
278
279
280
281 if (rfkill->ops->query)
282 rfkill->ops->query(rfkill, rfkill->data);
283
284 spin_lock_irqsave(&rfkill->lock, flags);
285 if (rfkill->state & RFKILL_BLOCK_SW)
286 rfkill->state |= RFKILL_BLOCK_SW_PREV;
287 else
288 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
289
290 if (blocked)
291 rfkill->state |= RFKILL_BLOCK_SW;
292 else
293 rfkill->state &= ~RFKILL_BLOCK_SW;
294
295 rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
296 spin_unlock_irqrestore(&rfkill->lock, flags);
297
298 err = rfkill->ops->set_block(rfkill->data, blocked);
299
300 spin_lock_irqsave(&rfkill->lock, flags);
301 if (err) {
302
303
304
305
306
307 if (rfkill->state & RFKILL_BLOCK_SW_PREV)
308 rfkill->state |= RFKILL_BLOCK_SW;
309 else
310 rfkill->state &= ~RFKILL_BLOCK_SW;
311 }
312 rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
313 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
314 spin_unlock_irqrestore(&rfkill->lock, flags);
315
316 rfkill_led_trigger_event(rfkill);
317 rfkill_event(rfkill);
318}
319
320#ifdef CONFIG_RFKILL_INPUT
321static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
322
323
324
325
326
327
328
329
330
331
332
333
334static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
335{
336 struct rfkill *rfkill;
337
338 rfkill_global_states[type].cur = blocked;
339 list_for_each_entry(rfkill, &rfkill_list, node) {
340 if (rfkill->type != type)
341 continue;
342
343 rfkill_set_block(rfkill, blocked);
344 }
345}
346
347
348
349
350
351
352
353
354
355
356
357void rfkill_switch_all(enum rfkill_type type, bool blocked)
358{
359 if (atomic_read(&rfkill_input_disabled))
360 return;
361
362 mutex_lock(&rfkill_global_mutex);
363
364 if (!rfkill_epo_lock_active)
365 __rfkill_switch_all(type, blocked);
366
367 mutex_unlock(&rfkill_global_mutex);
368}
369
370
371
372
373
374
375
376
377
378
379void rfkill_epo(void)
380{
381 struct rfkill *rfkill;
382 int i;
383
384 if (atomic_read(&rfkill_input_disabled))
385 return;
386
387 mutex_lock(&rfkill_global_mutex);
388
389 rfkill_epo_lock_active = true;
390 list_for_each_entry(rfkill, &rfkill_list, node)
391 rfkill_set_block(rfkill, true);
392
393 for (i = 0; i < NUM_RFKILL_TYPES; i++) {
394 rfkill_global_states[i].sav = rfkill_global_states[i].cur;
395 rfkill_global_states[i].cur = true;
396 }
397
398 mutex_unlock(&rfkill_global_mutex);
399}
400
401
402
403
404
405
406
407
408void rfkill_restore_states(void)
409{
410 int i;
411
412 if (atomic_read(&rfkill_input_disabled))
413 return;
414
415 mutex_lock(&rfkill_global_mutex);
416
417 rfkill_epo_lock_active = false;
418 for (i = 0; i < NUM_RFKILL_TYPES; i++)
419 __rfkill_switch_all(i, rfkill_global_states[i].sav);
420 mutex_unlock(&rfkill_global_mutex);
421}
422
423
424
425
426
427
428
429void rfkill_remove_epo_lock(void)
430{
431 if (atomic_read(&rfkill_input_disabled))
432 return;
433
434 mutex_lock(&rfkill_global_mutex);
435 rfkill_epo_lock_active = false;
436 mutex_unlock(&rfkill_global_mutex);
437}
438
439
440
441
442
443
444
445
446
447
448bool rfkill_is_epo_lock_active(void)
449{
450 return rfkill_epo_lock_active;
451}
452
453
454
455
456
457
458
459
460bool rfkill_get_global_sw_state(const enum rfkill_type type)
461{
462 return rfkill_global_states[type].cur;
463}
464#endif
465
466
467bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
468{
469 bool ret, change;
470
471 ret = __rfkill_set_hw_state(rfkill, blocked, &change);
472
473 if (!rfkill->registered)
474 return ret;
475
476 if (change)
477 schedule_work(&rfkill->uevent_work);
478
479 return ret;
480}
481EXPORT_SYMBOL(rfkill_set_hw_state);
482
483static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
484{
485 u32 bit = RFKILL_BLOCK_SW;
486
487
488 if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
489 bit = RFKILL_BLOCK_SW_PREV;
490
491 if (blocked)
492 rfkill->state |= bit;
493 else
494 rfkill->state &= ~bit;
495}
496
497bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
498{
499 unsigned long flags;
500 bool prev, hwblock;
501
502 BUG_ON(!rfkill);
503
504 spin_lock_irqsave(&rfkill->lock, flags);
505 prev = !!(rfkill->state & RFKILL_BLOCK_SW);
506 __rfkill_set_sw_state(rfkill, blocked);
507 hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
508 blocked = blocked || hwblock;
509 spin_unlock_irqrestore(&rfkill->lock, flags);
510
511 if (!rfkill->registered)
512 return blocked;
513
514 if (prev != blocked && !hwblock)
515 schedule_work(&rfkill->uevent_work);
516
517 rfkill_led_trigger_event(rfkill);
518
519 return blocked;
520}
521EXPORT_SYMBOL(rfkill_set_sw_state);
522
523void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
524{
525 unsigned long flags;
526
527 BUG_ON(!rfkill);
528 BUG_ON(rfkill->registered);
529
530 spin_lock_irqsave(&rfkill->lock, flags);
531 __rfkill_set_sw_state(rfkill, blocked);
532 rfkill->persistent = true;
533 spin_unlock_irqrestore(&rfkill->lock, flags);
534}
535EXPORT_SYMBOL(rfkill_init_sw_state);
536
537void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
538{
539 unsigned long flags;
540 bool swprev, hwprev;
541
542 BUG_ON(!rfkill);
543
544 spin_lock_irqsave(&rfkill->lock, flags);
545
546
547
548
549
550 swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
551 hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
552 __rfkill_set_sw_state(rfkill, sw);
553 if (hw)
554 rfkill->state |= RFKILL_BLOCK_HW;
555 else
556 rfkill->state &= ~RFKILL_BLOCK_HW;
557
558 spin_unlock_irqrestore(&rfkill->lock, flags);
559
560 if (!rfkill->registered) {
561 rfkill->persistent = true;
562 } else {
563 if (swprev != sw || hwprev != hw)
564 schedule_work(&rfkill->uevent_work);
565
566 rfkill_led_trigger_event(rfkill);
567 }
568}
569EXPORT_SYMBOL(rfkill_set_states);
570
571static ssize_t rfkill_name_show(struct device *dev,
572 struct device_attribute *attr,
573 char *buf)
574{
575 struct rfkill *rfkill = to_rfkill(dev);
576
577 return sprintf(buf, "%s\n", rfkill->name);
578}
579
580static const char *rfkill_get_type_str(enum rfkill_type type)
581{
582 switch (type) {
583 case RFKILL_TYPE_WLAN:
584 return "wlan";
585 case RFKILL_TYPE_BLUETOOTH:
586 return "bluetooth";
587 case RFKILL_TYPE_UWB:
588 return "ultrawideband";
589 case RFKILL_TYPE_WIMAX:
590 return "wimax";
591 case RFKILL_TYPE_WWAN:
592 return "wwan";
593 case RFKILL_TYPE_GPS:
594 return "gps";
595 default:
596 BUG();
597 }
598
599 BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_GPS + 1);
600}
601
602static ssize_t rfkill_type_show(struct device *dev,
603 struct device_attribute *attr,
604 char *buf)
605{
606 struct rfkill *rfkill = to_rfkill(dev);
607
608 return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
609}
610
611static ssize_t rfkill_idx_show(struct device *dev,
612 struct device_attribute *attr,
613 char *buf)
614{
615 struct rfkill *rfkill = to_rfkill(dev);
616
617 return sprintf(buf, "%d\n", rfkill->idx);
618}
619
620static ssize_t rfkill_persistent_show(struct device *dev,
621 struct device_attribute *attr,
622 char *buf)
623{
624 struct rfkill *rfkill = to_rfkill(dev);
625
626 return sprintf(buf, "%d\n", rfkill->persistent);
627}
628
629static u8 user_state_from_blocked(unsigned long state)
630{
631 if (state & RFKILL_BLOCK_HW)
632 return RFKILL_USER_STATE_HARD_BLOCKED;
633 if (state & RFKILL_BLOCK_SW)
634 return RFKILL_USER_STATE_SOFT_BLOCKED;
635
636 return RFKILL_USER_STATE_UNBLOCKED;
637}
638
639static ssize_t rfkill_state_show(struct device *dev,
640 struct device_attribute *attr,
641 char *buf)
642{
643 struct rfkill *rfkill = to_rfkill(dev);
644 unsigned long flags;
645 u32 state;
646
647 spin_lock_irqsave(&rfkill->lock, flags);
648 state = rfkill->state;
649 spin_unlock_irqrestore(&rfkill->lock, flags);
650
651 return sprintf(buf, "%d\n", user_state_from_blocked(state));
652}
653
654static ssize_t rfkill_state_store(struct device *dev,
655 struct device_attribute *attr,
656 const char *buf, size_t count)
657{
658 struct rfkill *rfkill = to_rfkill(dev);
659 unsigned long state;
660 int err;
661
662 if (!capable(CAP_NET_ADMIN))
663 return -EPERM;
664
665 err = strict_strtoul(buf, 0, &state);
666 if (err)
667 return err;
668
669 if (state != RFKILL_USER_STATE_SOFT_BLOCKED &&
670 state != RFKILL_USER_STATE_UNBLOCKED)
671 return -EINVAL;
672
673 mutex_lock(&rfkill_global_mutex);
674 rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
675 mutex_unlock(&rfkill_global_mutex);
676
677 return err ?: count;
678}
679
680static ssize_t rfkill_claim_show(struct device *dev,
681 struct device_attribute *attr,
682 char *buf)
683{
684 return sprintf(buf, "%d\n", 0);
685}
686
687static ssize_t rfkill_claim_store(struct device *dev,
688 struct device_attribute *attr,
689 const char *buf, size_t count)
690{
691 return -EOPNOTSUPP;
692}
693
694static struct device_attribute rfkill_dev_attrs[] = {
695 __ATTR(name, S_IRUGO, rfkill_name_show, NULL),
696 __ATTR(type, S_IRUGO, rfkill_type_show, NULL),
697 __ATTR(index, S_IRUGO, rfkill_idx_show, NULL),
698 __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
699 __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
700 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
701 __ATTR_NULL
702};
703
704static void rfkill_release(struct device *dev)
705{
706 struct rfkill *rfkill = to_rfkill(dev);
707
708 kfree(rfkill);
709}
710
711static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
712{
713 struct rfkill *rfkill = to_rfkill(dev);
714 unsigned long flags;
715 u32 state;
716 int error;
717
718 error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
719 if (error)
720 return error;
721 error = add_uevent_var(env, "RFKILL_TYPE=%s",
722 rfkill_get_type_str(rfkill->type));
723 if (error)
724 return error;
725 spin_lock_irqsave(&rfkill->lock, flags);
726 state = rfkill->state;
727 spin_unlock_irqrestore(&rfkill->lock, flags);
728 error = add_uevent_var(env, "RFKILL_STATE=%d",
729 user_state_from_blocked(state));
730 return error;
731}
732
733void rfkill_pause_polling(struct rfkill *rfkill)
734{
735 BUG_ON(!rfkill);
736
737 if (!rfkill->ops->poll)
738 return;
739
740 cancel_delayed_work_sync(&rfkill->poll_work);
741}
742EXPORT_SYMBOL(rfkill_pause_polling);
743
744void rfkill_resume_polling(struct rfkill *rfkill)
745{
746 BUG_ON(!rfkill);
747
748 if (!rfkill->ops->poll)
749 return;
750
751 schedule_work(&rfkill->poll_work.work);
752}
753EXPORT_SYMBOL(rfkill_resume_polling);
754
755static int rfkill_suspend(struct device *dev, pm_message_t state)
756{
757 struct rfkill *rfkill = to_rfkill(dev);
758
759 rfkill_pause_polling(rfkill);
760
761 return 0;
762}
763
764static int rfkill_resume(struct device *dev)
765{
766 struct rfkill *rfkill = to_rfkill(dev);
767 bool cur;
768
769 if (!rfkill->persistent) {
770 cur = !!(rfkill->state & RFKILL_BLOCK_SW);
771 rfkill_set_block(rfkill, cur);
772 }
773
774 rfkill_resume_polling(rfkill);
775
776 return 0;
777}
778
779static struct class rfkill_class = {
780 .name = "rfkill",
781 .dev_release = rfkill_release,
782 .dev_attrs = rfkill_dev_attrs,
783 .dev_uevent = rfkill_dev_uevent,
784 .suspend = rfkill_suspend,
785 .resume = rfkill_resume,
786};
787
788bool rfkill_blocked(struct rfkill *rfkill)
789{
790 unsigned long flags;
791 u32 state;
792
793 spin_lock_irqsave(&rfkill->lock, flags);
794 state = rfkill->state;
795 spin_unlock_irqrestore(&rfkill->lock, flags);
796
797 return !!(state & RFKILL_BLOCK_ANY);
798}
799EXPORT_SYMBOL(rfkill_blocked);
800
801
802struct rfkill * __must_check rfkill_alloc(const char *name,
803 struct device *parent,
804 const enum rfkill_type type,
805 const struct rfkill_ops *ops,
806 void *ops_data)
807{
808 struct rfkill *rfkill;
809 struct device *dev;
810
811 if (WARN_ON(!ops))
812 return NULL;
813
814 if (WARN_ON(!ops->set_block))
815 return NULL;
816
817 if (WARN_ON(!name))
818 return NULL;
819
820 if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
821 return NULL;
822
823 rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
824 if (!rfkill)
825 return NULL;
826
827 spin_lock_init(&rfkill->lock);
828 INIT_LIST_HEAD(&rfkill->node);
829 rfkill->type = type;
830 rfkill->name = name;
831 rfkill->ops = ops;
832 rfkill->data = ops_data;
833
834 dev = &rfkill->dev;
835 dev->class = &rfkill_class;
836 dev->parent = parent;
837 device_initialize(dev);
838
839 return rfkill;
840}
841EXPORT_SYMBOL(rfkill_alloc);
842
843static void rfkill_poll(struct work_struct *work)
844{
845 struct rfkill *rfkill;
846
847 rfkill = container_of(work, struct rfkill, poll_work.work);
848
849
850
851
852
853
854 rfkill->ops->poll(rfkill, rfkill->data);
855
856 schedule_delayed_work(&rfkill->poll_work,
857 round_jiffies_relative(POLL_INTERVAL));
858}
859
860static void rfkill_uevent_work(struct work_struct *work)
861{
862 struct rfkill *rfkill;
863
864 rfkill = container_of(work, struct rfkill, uevent_work);
865
866 mutex_lock(&rfkill_global_mutex);
867 rfkill_event(rfkill);
868 mutex_unlock(&rfkill_global_mutex);
869}
870
871static void rfkill_sync_work(struct work_struct *work)
872{
873 struct rfkill *rfkill;
874 bool cur;
875
876 rfkill = container_of(work, struct rfkill, sync_work);
877
878 mutex_lock(&rfkill_global_mutex);
879 cur = rfkill_global_states[rfkill->type].cur;
880 rfkill_set_block(rfkill, cur);
881 mutex_unlock(&rfkill_global_mutex);
882}
883
884int __must_check rfkill_register(struct rfkill *rfkill)
885{
886 static unsigned long rfkill_no;
887 struct device *dev = &rfkill->dev;
888 int error;
889
890 BUG_ON(!rfkill);
891
892 mutex_lock(&rfkill_global_mutex);
893
894 if (rfkill->registered) {
895 error = -EALREADY;
896 goto unlock;
897 }
898
899 rfkill->idx = rfkill_no;
900 dev_set_name(dev, "rfkill%lu", rfkill_no);
901 rfkill_no++;
902
903 list_add_tail(&rfkill->node, &rfkill_list);
904
905 error = device_add(dev);
906 if (error)
907 goto remove;
908
909 error = rfkill_led_trigger_register(rfkill);
910 if (error)
911 goto devdel;
912
913 rfkill->registered = true;
914
915 INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
916 INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
917 INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
918
919 if (rfkill->ops->poll)
920 schedule_delayed_work(&rfkill->poll_work,
921 round_jiffies_relative(POLL_INTERVAL));
922
923 if (!rfkill->persistent || rfkill_epo_lock_active) {
924 schedule_work(&rfkill->sync_work);
925 } else {
926#ifdef CONFIG_RFKILL_INPUT
927 bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
928
929 if (!atomic_read(&rfkill_input_disabled))
930 __rfkill_switch_all(rfkill->type, soft_blocked);
931#endif
932 }
933
934 rfkill_send_events(rfkill, RFKILL_OP_ADD);
935
936 mutex_unlock(&rfkill_global_mutex);
937 return 0;
938
939 devdel:
940 device_del(&rfkill->dev);
941 remove:
942 list_del_init(&rfkill->node);
943 unlock:
944 mutex_unlock(&rfkill_global_mutex);
945 return error;
946}
947EXPORT_SYMBOL(rfkill_register);
948
949void rfkill_unregister(struct rfkill *rfkill)
950{
951 BUG_ON(!rfkill);
952
953 if (rfkill->ops->poll)
954 cancel_delayed_work_sync(&rfkill->poll_work);
955
956 cancel_work_sync(&rfkill->uevent_work);
957 cancel_work_sync(&rfkill->sync_work);
958
959 rfkill->registered = false;
960
961 device_del(&rfkill->dev);
962
963 mutex_lock(&rfkill_global_mutex);
964 rfkill_send_events(rfkill, RFKILL_OP_DEL);
965 list_del_init(&rfkill->node);
966 mutex_unlock(&rfkill_global_mutex);
967
968 rfkill_led_trigger_unregister(rfkill);
969}
970EXPORT_SYMBOL(rfkill_unregister);
971
972void rfkill_destroy(struct rfkill *rfkill)
973{
974 if (rfkill)
975 put_device(&rfkill->dev);
976}
977EXPORT_SYMBOL(rfkill_destroy);
978
979static int rfkill_fop_open(struct inode *inode, struct file *file)
980{
981 struct rfkill_data *data;
982 struct rfkill *rfkill;
983 struct rfkill_int_event *ev, *tmp;
984
985 data = kzalloc(sizeof(*data), GFP_KERNEL);
986 if (!data)
987 return -ENOMEM;
988
989 INIT_LIST_HEAD(&data->events);
990 mutex_init(&data->mtx);
991 init_waitqueue_head(&data->read_wait);
992
993 mutex_lock(&rfkill_global_mutex);
994 mutex_lock(&data->mtx);
995
996
997
998
999 list_add(&data->list, &rfkill_fds);
1000
1001 list_for_each_entry(rfkill, &rfkill_list, node) {
1002 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
1003 if (!ev)
1004 goto free;
1005 rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
1006 list_add_tail(&ev->list, &data->events);
1007 }
1008 mutex_unlock(&data->mtx);
1009 mutex_unlock(&rfkill_global_mutex);
1010
1011 file->private_data = data;
1012
1013 return nonseekable_open(inode, file);
1014
1015 free:
1016 mutex_unlock(&data->mtx);
1017 mutex_unlock(&rfkill_global_mutex);
1018 mutex_destroy(&data->mtx);
1019 list_for_each_entry_safe(ev, tmp, &data->events, list)
1020 kfree(ev);
1021 kfree(data);
1022 return -ENOMEM;
1023}
1024
1025static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
1026{
1027 struct rfkill_data *data = file->private_data;
1028 unsigned int res = POLLOUT | POLLWRNORM;
1029
1030 poll_wait(file, &data->read_wait, wait);
1031
1032 mutex_lock(&data->mtx);
1033 if (!list_empty(&data->events))
1034 res = POLLIN | POLLRDNORM;
1035 mutex_unlock(&data->mtx);
1036
1037 return res;
1038}
1039
1040static bool rfkill_readable(struct rfkill_data *data)
1041{
1042 bool r;
1043
1044 mutex_lock(&data->mtx);
1045 r = !list_empty(&data->events);
1046 mutex_unlock(&data->mtx);
1047
1048 return r;
1049}
1050
1051static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
1052 size_t count, loff_t *pos)
1053{
1054 struct rfkill_data *data = file->private_data;
1055 struct rfkill_int_event *ev;
1056 unsigned long sz;
1057 int ret;
1058
1059 mutex_lock(&data->mtx);
1060
1061 while (list_empty(&data->events)) {
1062 if (file->f_flags & O_NONBLOCK) {
1063 ret = -EAGAIN;
1064 goto out;
1065 }
1066 mutex_unlock(&data->mtx);
1067 ret = wait_event_interruptible(data->read_wait,
1068 rfkill_readable(data));
1069 mutex_lock(&data->mtx);
1070
1071 if (ret)
1072 goto out;
1073 }
1074
1075 ev = list_first_entry(&data->events, struct rfkill_int_event,
1076 list);
1077
1078 sz = min_t(unsigned long, sizeof(ev->ev), count);
1079 ret = sz;
1080 if (copy_to_user(buf, &ev->ev, sz))
1081 ret = -EFAULT;
1082
1083 list_del(&ev->list);
1084 kfree(ev);
1085 out:
1086 mutex_unlock(&data->mtx);
1087 return ret;
1088}
1089
1090static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
1091 size_t count, loff_t *pos)
1092{
1093 struct rfkill *rfkill;
1094 struct rfkill_event ev;
1095
1096
1097 if (count < RFKILL_EVENT_SIZE_V1 - 1)
1098 return -EINVAL;
1099
1100
1101
1102
1103
1104
1105 count = min(count, sizeof(ev));
1106 if (copy_from_user(&ev, buf, count))
1107 return -EFAULT;
1108
1109 if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL)
1110 return -EINVAL;
1111
1112 if (ev.type >= NUM_RFKILL_TYPES)
1113 return -EINVAL;
1114
1115 mutex_lock(&rfkill_global_mutex);
1116
1117 if (ev.op == RFKILL_OP_CHANGE_ALL) {
1118 if (ev.type == RFKILL_TYPE_ALL) {
1119 enum rfkill_type i;
1120 for (i = 0; i < NUM_RFKILL_TYPES; i++)
1121 rfkill_global_states[i].cur = ev.soft;
1122 } else {
1123 rfkill_global_states[ev.type].cur = ev.soft;
1124 }
1125 }
1126
1127 list_for_each_entry(rfkill, &rfkill_list, node) {
1128 if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL)
1129 continue;
1130
1131 if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL)
1132 continue;
1133
1134 rfkill_set_block(rfkill, ev.soft);
1135 }
1136 mutex_unlock(&rfkill_global_mutex);
1137
1138 return count;
1139}
1140
1141static int rfkill_fop_release(struct inode *inode, struct file *file)
1142{
1143 struct rfkill_data *data = file->private_data;
1144 struct rfkill_int_event *ev, *tmp;
1145
1146 mutex_lock(&rfkill_global_mutex);
1147 list_del(&data->list);
1148 mutex_unlock(&rfkill_global_mutex);
1149
1150 mutex_destroy(&data->mtx);
1151 list_for_each_entry_safe(ev, tmp, &data->events, list)
1152 kfree(ev);
1153
1154#ifdef CONFIG_RFKILL_INPUT
1155 if (data->input_handler)
1156 if (atomic_dec_return(&rfkill_input_disabled) == 0)
1157 printk(KERN_DEBUG "rfkill: input handler enabled\n");
1158#endif
1159
1160 kfree(data);
1161
1162 return 0;
1163}
1164
1165#ifdef CONFIG_RFKILL_INPUT
1166static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
1167 unsigned long arg)
1168{
1169 struct rfkill_data *data = file->private_data;
1170
1171 if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
1172 return -ENOSYS;
1173
1174 if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT)
1175 return -ENOSYS;
1176
1177 mutex_lock(&data->mtx);
1178
1179 if (!data->input_handler) {
1180 if (atomic_inc_return(&rfkill_input_disabled) == 1)
1181 printk(KERN_DEBUG "rfkill: input handler disabled\n");
1182 data->input_handler = true;
1183 }
1184
1185 mutex_unlock(&data->mtx);
1186
1187 return 0;
1188}
1189#endif
1190
1191static const struct file_operations rfkill_fops = {
1192 .owner = THIS_MODULE,
1193 .open = rfkill_fop_open,
1194 .read = rfkill_fop_read,
1195 .write = rfkill_fop_write,
1196 .poll = rfkill_fop_poll,
1197 .release = rfkill_fop_release,
1198#ifdef CONFIG_RFKILL_INPUT
1199 .unlocked_ioctl = rfkill_fop_ioctl,
1200 .compat_ioctl = rfkill_fop_ioctl,
1201#endif
1202};
1203
1204static struct miscdevice rfkill_miscdev = {
1205 .name = "rfkill",
1206 .fops = &rfkill_fops,
1207 .minor = MISC_DYNAMIC_MINOR,
1208};
1209
1210static int __init rfkill_init(void)
1211{
1212 int error;
1213 int i;
1214
1215 for (i = 0; i < NUM_RFKILL_TYPES; i++)
1216 rfkill_global_states[i].cur = !rfkill_default_state;
1217
1218 error = class_register(&rfkill_class);
1219 if (error)
1220 goto out;
1221
1222 error = misc_register(&rfkill_miscdev);
1223 if (error) {
1224 class_unregister(&rfkill_class);
1225 goto out;
1226 }
1227
1228#ifdef CONFIG_RFKILL_INPUT
1229 error = rfkill_handler_init();
1230 if (error) {
1231 misc_deregister(&rfkill_miscdev);
1232 class_unregister(&rfkill_class);
1233 goto out;
1234 }
1235#endif
1236
1237 out:
1238 return error;
1239}
1240subsys_initcall(rfkill_init);
1241
1242static void __exit rfkill_exit(void)
1243{
1244#ifdef CONFIG_RFKILL_INPUT
1245 rfkill_handler_exit();
1246#endif
1247 misc_deregister(&rfkill_miscdev);
1248 class_unregister(&rfkill_class);
1249}
1250module_exit(rfkill_exit);
1251