1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/workqueue.h>
24#include <linux/capability.h>
25#include <linux/list.h>
26#include <linux/mutex.h>
27#include <linux/rfkill.h>
28#include <linux/sched.h>
29#include <linux/spinlock.h>
30#include <linux/device.h>
31#include <linux/miscdevice.h>
32#include <linux/wait.h>
33#include <linux/poll.h>
34#include <linux/fs.h>
35#include <linux/slab.h>
36
37#include "rfkill.h"
38
39#define POLL_INTERVAL (5 * HZ)
40
41#define RFKILL_BLOCK_HW BIT(0)
42#define RFKILL_BLOCK_SW BIT(1)
43#define RFKILL_BLOCK_SW_PREV BIT(2)
44#define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\
45 RFKILL_BLOCK_SW |\
46 RFKILL_BLOCK_SW_PREV)
47#define RFKILL_BLOCK_SW_SETCALL BIT(31)
48
49struct rfkill {
50 spinlock_t lock;
51
52 const char *name;
53 enum rfkill_type type;
54
55 unsigned long state;
56
57 u32 idx;
58
59 bool registered;
60 bool persistent;
61
62 const struct rfkill_ops *ops;
63 void *data;
64
65#ifdef CONFIG_RFKILL_LEDS
66 struct led_trigger led_trigger;
67 const char *ledtrigname;
68#endif
69
70 struct device dev;
71 struct list_head node;
72
73 struct delayed_work poll_work;
74 struct work_struct uevent_work;
75 struct work_struct sync_work;
76};
77#define to_rfkill(d) container_of(d, struct rfkill, dev)
78
79struct rfkill_int_event {
80 struct list_head list;
81 struct rfkill_event ev;
82};
83
84struct rfkill_data {
85 struct list_head list;
86 struct list_head events;
87 struct mutex mtx;
88 wait_queue_head_t read_wait;
89 bool input_handler;
90};
91
92
93MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
94MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
95MODULE_DESCRIPTION("RF switch support");
96MODULE_LICENSE("GPL");
97
98
99
100
101
102
103
104
105
106
107
108
109
110static LIST_HEAD(rfkill_list);
111static DEFINE_MUTEX(rfkill_global_mutex);
112static LIST_HEAD(rfkill_fds);
113
114static unsigned int rfkill_default_state = 1;
115module_param_named(default_state, rfkill_default_state, uint, 0444);
116MODULE_PARM_DESC(default_state,
117 "Default initial state for all radio types, 0 = radio off");
118
119static struct {
120 bool cur, sav;
121} rfkill_global_states[NUM_RFKILL_TYPES];
122
123static bool rfkill_epo_lock_active;
124
125
126#ifdef CONFIG_RFKILL_LEDS
127static void rfkill_led_trigger_event(struct rfkill *rfkill)
128{
129 struct led_trigger *trigger;
130
131 if (!rfkill->registered)
132 return;
133
134 trigger = &rfkill->led_trigger;
135
136 if (rfkill->state & RFKILL_BLOCK_ANY)
137 led_trigger_event(trigger, LED_OFF);
138 else
139 led_trigger_event(trigger, LED_FULL);
140}
141
142static void rfkill_led_trigger_activate(struct led_classdev *led)
143{
144 struct rfkill *rfkill;
145
146 rfkill = container_of(led->trigger, struct rfkill, led_trigger);
147
148 rfkill_led_trigger_event(rfkill);
149}
150
151const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
152{
153 return rfkill->led_trigger.name;
154}
155EXPORT_SYMBOL(rfkill_get_led_trigger_name);
156
157void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
158{
159 BUG_ON(!rfkill);
160
161 rfkill->ledtrigname = name;
162}
163EXPORT_SYMBOL(rfkill_set_led_trigger_name);
164
165static int rfkill_led_trigger_register(struct rfkill *rfkill)
166{
167 rfkill->led_trigger.name = rfkill->ledtrigname
168 ? : dev_name(&rfkill->dev);
169 rfkill->led_trigger.activate = rfkill_led_trigger_activate;
170 return led_trigger_register(&rfkill->led_trigger);
171}
172
173static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
174{
175 led_trigger_unregister(&rfkill->led_trigger);
176}
177#else
178static void rfkill_led_trigger_event(struct rfkill *rfkill)
179{
180}
181
182static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
183{
184 return 0;
185}
186
187static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
188{
189}
190#endif
191
192static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
193 enum rfkill_operation op)
194{
195 unsigned long flags;
196
197 ev->idx = rfkill->idx;
198 ev->type = rfkill->type;
199 ev->op = op;
200
201 spin_lock_irqsave(&rfkill->lock, flags);
202 ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
203 ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
204 RFKILL_BLOCK_SW_PREV));
205 spin_unlock_irqrestore(&rfkill->lock, flags);
206}
207
208static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
209{
210 struct rfkill_data *data;
211 struct rfkill_int_event *ev;
212
213 list_for_each_entry(data, &rfkill_fds, list) {
214 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
215 if (!ev)
216 continue;
217 rfkill_fill_event(&ev->ev, rfkill, op);
218 mutex_lock(&data->mtx);
219 list_add_tail(&ev->list, &data->events);
220 mutex_unlock(&data->mtx);
221 wake_up_interruptible(&data->read_wait);
222 }
223}
224
225static void rfkill_event(struct rfkill *rfkill)
226{
227 if (!rfkill->registered)
228 return;
229
230 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
231
232
233 rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
234}
235
236static bool __rfkill_set_hw_state(struct rfkill *rfkill,
237 bool blocked, bool *change)
238{
239 unsigned long flags;
240 bool prev, any;
241
242 BUG_ON(!rfkill);
243
244 spin_lock_irqsave(&rfkill->lock, flags);
245 prev = !!(rfkill->state & RFKILL_BLOCK_HW);
246 if (blocked)
247 rfkill->state |= RFKILL_BLOCK_HW;
248 else
249 rfkill->state &= ~RFKILL_BLOCK_HW;
250 *change = prev != blocked;
251 any = !!(rfkill->state & RFKILL_BLOCK_ANY);
252 spin_unlock_irqrestore(&rfkill->lock, flags);
253
254 rfkill_led_trigger_event(rfkill);
255
256 return any;
257}
258
259
260
261
262
263
264
265
266
267
268static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
269{
270 unsigned long flags;
271 bool prev, curr;
272 int err;
273
274 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
275 return;
276
277
278
279
280
281
282 if (rfkill->ops->query)
283 rfkill->ops->query(rfkill, rfkill->data);
284
285 spin_lock_irqsave(&rfkill->lock, flags);
286 prev = rfkill->state & RFKILL_BLOCK_SW;
287
288 if (rfkill->state & RFKILL_BLOCK_SW)
289 rfkill->state |= RFKILL_BLOCK_SW_PREV;
290 else
291 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
292
293 if (blocked)
294 rfkill->state |= RFKILL_BLOCK_SW;
295 else
296 rfkill->state &= ~RFKILL_BLOCK_SW;
297
298 rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
299 spin_unlock_irqrestore(&rfkill->lock, flags);
300
301 err = rfkill->ops->set_block(rfkill->data, blocked);
302
303 spin_lock_irqsave(&rfkill->lock, flags);
304 if (err) {
305
306
307
308
309
310 if (rfkill->state & RFKILL_BLOCK_SW_PREV)
311 rfkill->state |= RFKILL_BLOCK_SW;
312 else
313 rfkill->state &= ~RFKILL_BLOCK_SW;
314 }
315 rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
316 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
317 curr = rfkill->state & RFKILL_BLOCK_SW;
318 spin_unlock_irqrestore(&rfkill->lock, flags);
319
320 rfkill_led_trigger_event(rfkill);
321
322 if (prev != curr)
323 rfkill_event(rfkill);
324}
325
326#ifdef CONFIG_RFKILL_INPUT
327static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
328
329
330
331
332
333
334
335
336
337
338
339
340static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
341{
342 struct rfkill *rfkill;
343
344 rfkill_global_states[type].cur = blocked;
345 list_for_each_entry(rfkill, &rfkill_list, node) {
346 if (rfkill->type != type && type != RFKILL_TYPE_ALL)
347 continue;
348
349 rfkill_set_block(rfkill, blocked);
350 }
351}
352
353
354
355
356
357
358
359
360
361
362
363void rfkill_switch_all(enum rfkill_type type, bool blocked)
364{
365 if (atomic_read(&rfkill_input_disabled))
366 return;
367
368 mutex_lock(&rfkill_global_mutex);
369
370 if (!rfkill_epo_lock_active)
371 __rfkill_switch_all(type, blocked);
372
373 mutex_unlock(&rfkill_global_mutex);
374}
375
376
377
378
379
380
381
382
383
384
385void rfkill_epo(void)
386{
387 struct rfkill *rfkill;
388 int i;
389
390 if (atomic_read(&rfkill_input_disabled))
391 return;
392
393 mutex_lock(&rfkill_global_mutex);
394
395 rfkill_epo_lock_active = true;
396 list_for_each_entry(rfkill, &rfkill_list, node)
397 rfkill_set_block(rfkill, true);
398
399 for (i = 0; i < NUM_RFKILL_TYPES; i++) {
400 rfkill_global_states[i].sav = rfkill_global_states[i].cur;
401 rfkill_global_states[i].cur = true;
402 }
403
404 mutex_unlock(&rfkill_global_mutex);
405}
406
407
408
409
410
411
412
413
414void rfkill_restore_states(void)
415{
416 int i;
417
418 if (atomic_read(&rfkill_input_disabled))
419 return;
420
421 mutex_lock(&rfkill_global_mutex);
422
423 rfkill_epo_lock_active = false;
424 for (i = 0; i < NUM_RFKILL_TYPES; i++)
425 __rfkill_switch_all(i, rfkill_global_states[i].sav);
426 mutex_unlock(&rfkill_global_mutex);
427}
428
429
430
431
432
433
434
435void rfkill_remove_epo_lock(void)
436{
437 if (atomic_read(&rfkill_input_disabled))
438 return;
439
440 mutex_lock(&rfkill_global_mutex);
441 rfkill_epo_lock_active = false;
442 mutex_unlock(&rfkill_global_mutex);
443}
444
445
446
447
448
449
450
451
452
453
454bool rfkill_is_epo_lock_active(void)
455{
456 return rfkill_epo_lock_active;
457}
458
459
460
461
462
463
464
465
466bool rfkill_get_global_sw_state(const enum rfkill_type type)
467{
468 return rfkill_global_states[type].cur;
469}
470#endif
471
472
473bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
474{
475 bool ret, change;
476
477 ret = __rfkill_set_hw_state(rfkill, blocked, &change);
478
479 if (!rfkill->registered)
480 return ret;
481
482 if (change)
483 schedule_work(&rfkill->uevent_work);
484
485 return ret;
486}
487EXPORT_SYMBOL(rfkill_set_hw_state);
488
489static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
490{
491 u32 bit = RFKILL_BLOCK_SW;
492
493
494 if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
495 bit = RFKILL_BLOCK_SW_PREV;
496
497 if (blocked)
498 rfkill->state |= bit;
499 else
500 rfkill->state &= ~bit;
501}
502
503bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
504{
505 unsigned long flags;
506 bool prev, hwblock;
507
508 BUG_ON(!rfkill);
509
510 spin_lock_irqsave(&rfkill->lock, flags);
511 prev = !!(rfkill->state & RFKILL_BLOCK_SW);
512 __rfkill_set_sw_state(rfkill, blocked);
513 hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
514 blocked = blocked || hwblock;
515 spin_unlock_irqrestore(&rfkill->lock, flags);
516
517 if (!rfkill->registered)
518 return blocked;
519
520 if (prev != blocked && !hwblock)
521 schedule_work(&rfkill->uevent_work);
522
523 rfkill_led_trigger_event(rfkill);
524
525 return blocked;
526}
527EXPORT_SYMBOL(rfkill_set_sw_state);
528
529void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
530{
531 unsigned long flags;
532
533 BUG_ON(!rfkill);
534 BUG_ON(rfkill->registered);
535
536 spin_lock_irqsave(&rfkill->lock, flags);
537 __rfkill_set_sw_state(rfkill, blocked);
538 rfkill->persistent = true;
539 spin_unlock_irqrestore(&rfkill->lock, flags);
540}
541EXPORT_SYMBOL(rfkill_init_sw_state);
542
543void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
544{
545 unsigned long flags;
546 bool swprev, hwprev;
547
548 BUG_ON(!rfkill);
549
550 spin_lock_irqsave(&rfkill->lock, flags);
551
552
553
554
555
556 swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
557 hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
558 __rfkill_set_sw_state(rfkill, sw);
559 if (hw)
560 rfkill->state |= RFKILL_BLOCK_HW;
561 else
562 rfkill->state &= ~RFKILL_BLOCK_HW;
563
564 spin_unlock_irqrestore(&rfkill->lock, flags);
565
566 if (!rfkill->registered) {
567 rfkill->persistent = true;
568 } else {
569 if (swprev != sw || hwprev != hw)
570 schedule_work(&rfkill->uevent_work);
571
572 rfkill_led_trigger_event(rfkill);
573 }
574}
575EXPORT_SYMBOL(rfkill_set_states);
576
577static ssize_t name_show(struct device *dev, struct device_attribute *attr,
578 char *buf)
579{
580 struct rfkill *rfkill = to_rfkill(dev);
581
582 return sprintf(buf, "%s\n", rfkill->name);
583}
584static DEVICE_ATTR_RO(name);
585
586static const char *rfkill_get_type_str(enum rfkill_type type)
587{
588 BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_NFC + 1);
589
590 switch (type) {
591 case RFKILL_TYPE_WLAN:
592 return "wlan";
593 case RFKILL_TYPE_BLUETOOTH:
594 return "bluetooth";
595 case RFKILL_TYPE_UWB:
596 return "ultrawideband";
597 case RFKILL_TYPE_WIMAX:
598 return "wimax";
599 case RFKILL_TYPE_WWAN:
600 return "wwan";
601 case RFKILL_TYPE_GPS:
602 return "gps";
603 case RFKILL_TYPE_FM:
604 return "fm";
605 case RFKILL_TYPE_NFC:
606 return "nfc";
607 default:
608 BUG();
609 }
610}
611
612static ssize_t type_show(struct device *dev, struct device_attribute *attr,
613 char *buf)
614{
615 struct rfkill *rfkill = to_rfkill(dev);
616
617 return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
618}
619static DEVICE_ATTR_RO(type);
620
621static ssize_t index_show(struct device *dev, struct device_attribute *attr,
622 char *buf)
623{
624 struct rfkill *rfkill = to_rfkill(dev);
625
626 return sprintf(buf, "%d\n", rfkill->idx);
627}
628static DEVICE_ATTR_RO(index);
629
630static ssize_t persistent_show(struct device *dev,
631 struct device_attribute *attr, char *buf)
632{
633 struct rfkill *rfkill = to_rfkill(dev);
634
635 return sprintf(buf, "%d\n", rfkill->persistent);
636}
637static DEVICE_ATTR_RO(persistent);
638
639static ssize_t hard_show(struct device *dev, struct device_attribute *attr,
640 char *buf)
641{
642 struct rfkill *rfkill = to_rfkill(dev);
643
644 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
645}
646static DEVICE_ATTR_RO(hard);
647
648static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
649 char *buf)
650{
651 struct rfkill *rfkill = to_rfkill(dev);
652
653 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
654}
655
656static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
657 const char *buf, size_t count)
658{
659 struct rfkill *rfkill = to_rfkill(dev);
660 unsigned long state;
661 int err;
662
663 if (!capable(CAP_NET_ADMIN))
664 return -EPERM;
665
666 err = kstrtoul(buf, 0, &state);
667 if (err)
668 return err;
669
670 if (state > 1 )
671 return -EINVAL;
672
673 mutex_lock(&rfkill_global_mutex);
674 rfkill_set_block(rfkill, state);
675 mutex_unlock(&rfkill_global_mutex);
676
677 return count;
678}
679static DEVICE_ATTR_RW(soft);
680
681static u8 user_state_from_blocked(unsigned long state)
682{
683 if (state & RFKILL_BLOCK_HW)
684 return RFKILL_USER_STATE_HARD_BLOCKED;
685 if (state & RFKILL_BLOCK_SW)
686 return RFKILL_USER_STATE_SOFT_BLOCKED;
687
688 return RFKILL_USER_STATE_UNBLOCKED;
689}
690
691static ssize_t state_show(struct device *dev, struct device_attribute *attr,
692 char *buf)
693{
694 struct rfkill *rfkill = to_rfkill(dev);
695
696 return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
697}
698
699static ssize_t state_store(struct device *dev, struct device_attribute *attr,
700 const char *buf, size_t count)
701{
702 struct rfkill *rfkill = to_rfkill(dev);
703 unsigned long state;
704 int err;
705
706 if (!capable(CAP_NET_ADMIN))
707 return -EPERM;
708
709 err = kstrtoul(buf, 0, &state);
710 if (err)
711 return err;
712
713 if (state != RFKILL_USER_STATE_SOFT_BLOCKED &&
714 state != RFKILL_USER_STATE_UNBLOCKED)
715 return -EINVAL;
716
717 mutex_lock(&rfkill_global_mutex);
718 rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
719 mutex_unlock(&rfkill_global_mutex);
720
721 return count;
722}
723static DEVICE_ATTR_RW(state);
724
725static ssize_t claim_show(struct device *dev, struct device_attribute *attr,
726 char *buf)
727{
728 return sprintf(buf, "%d\n", 0);
729}
730static DEVICE_ATTR_RO(claim);
731
732static struct attribute *rfkill_dev_attrs[] = {
733 &dev_attr_name.attr,
734 &dev_attr_type.attr,
735 &dev_attr_index.attr,
736 &dev_attr_persistent.attr,
737 &dev_attr_state.attr,
738 &dev_attr_claim.attr,
739 &dev_attr_soft.attr,
740 &dev_attr_hard.attr,
741 NULL,
742};
743ATTRIBUTE_GROUPS(rfkill_dev);
744
745static void rfkill_release(struct device *dev)
746{
747 struct rfkill *rfkill = to_rfkill(dev);
748
749 kfree(rfkill);
750}
751
752static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
753{
754 struct rfkill *rfkill = to_rfkill(dev);
755 unsigned long flags;
756 u32 state;
757 int error;
758
759 error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
760 if (error)
761 return error;
762 error = add_uevent_var(env, "RFKILL_TYPE=%s",
763 rfkill_get_type_str(rfkill->type));
764 if (error)
765 return error;
766 spin_lock_irqsave(&rfkill->lock, flags);
767 state = rfkill->state;
768 spin_unlock_irqrestore(&rfkill->lock, flags);
769 error = add_uevent_var(env, "RFKILL_STATE=%d",
770 user_state_from_blocked(state));
771 return error;
772}
773
774void rfkill_pause_polling(struct rfkill *rfkill)
775{
776 BUG_ON(!rfkill);
777
778 if (!rfkill->ops->poll)
779 return;
780
781 cancel_delayed_work_sync(&rfkill->poll_work);
782}
783EXPORT_SYMBOL(rfkill_pause_polling);
784
785void rfkill_resume_polling(struct rfkill *rfkill)
786{
787 BUG_ON(!rfkill);
788
789 if (!rfkill->ops->poll)
790 return;
791
792 schedule_work(&rfkill->poll_work.work);
793}
794EXPORT_SYMBOL(rfkill_resume_polling);
795
796static int rfkill_suspend(struct device *dev, pm_message_t state)
797{
798 struct rfkill *rfkill = to_rfkill(dev);
799
800 rfkill_pause_polling(rfkill);
801
802 return 0;
803}
804
805static int rfkill_resume(struct device *dev)
806{
807 struct rfkill *rfkill = to_rfkill(dev);
808 bool cur;
809
810 if (!rfkill->persistent) {
811 cur = !!(rfkill->state & RFKILL_BLOCK_SW);
812 rfkill_set_block(rfkill, cur);
813 }
814
815 rfkill_resume_polling(rfkill);
816
817 return 0;
818}
819
820static struct class rfkill_class = {
821 .name = "rfkill",
822 .dev_release = rfkill_release,
823 .dev_groups = rfkill_dev_groups,
824 .dev_uevent = rfkill_dev_uevent,
825 .suspend = rfkill_suspend,
826 .resume = rfkill_resume,
827};
828
829bool rfkill_blocked(struct rfkill *rfkill)
830{
831 unsigned long flags;
832 u32 state;
833
834 spin_lock_irqsave(&rfkill->lock, flags);
835 state = rfkill->state;
836 spin_unlock_irqrestore(&rfkill->lock, flags);
837
838 return !!(state & RFKILL_BLOCK_ANY);
839}
840EXPORT_SYMBOL(rfkill_blocked);
841
842
843struct rfkill * __must_check rfkill_alloc(const char *name,
844 struct device *parent,
845 const enum rfkill_type type,
846 const struct rfkill_ops *ops,
847 void *ops_data)
848{
849 struct rfkill *rfkill;
850 struct device *dev;
851
852 if (WARN_ON(!ops))
853 return NULL;
854
855 if (WARN_ON(!ops->set_block))
856 return NULL;
857
858 if (WARN_ON(!name))
859 return NULL;
860
861 if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
862 return NULL;
863
864 rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
865 if (!rfkill)
866 return NULL;
867
868 spin_lock_init(&rfkill->lock);
869 INIT_LIST_HEAD(&rfkill->node);
870 rfkill->type = type;
871 rfkill->name = name;
872 rfkill->ops = ops;
873 rfkill->data = ops_data;
874
875 dev = &rfkill->dev;
876 dev->class = &rfkill_class;
877 dev->parent = parent;
878 device_initialize(dev);
879
880 return rfkill;
881}
882EXPORT_SYMBOL(rfkill_alloc);
883
884static void rfkill_poll(struct work_struct *work)
885{
886 struct rfkill *rfkill;
887
888 rfkill = container_of(work, struct rfkill, poll_work.work);
889
890
891
892
893
894
895 rfkill->ops->poll(rfkill, rfkill->data);
896
897 schedule_delayed_work(&rfkill->poll_work,
898 round_jiffies_relative(POLL_INTERVAL));
899}
900
901static void rfkill_uevent_work(struct work_struct *work)
902{
903 struct rfkill *rfkill;
904
905 rfkill = container_of(work, struct rfkill, uevent_work);
906
907 mutex_lock(&rfkill_global_mutex);
908 rfkill_event(rfkill);
909 mutex_unlock(&rfkill_global_mutex);
910}
911
912static void rfkill_sync_work(struct work_struct *work)
913{
914 struct rfkill *rfkill;
915 bool cur;
916
917 rfkill = container_of(work, struct rfkill, sync_work);
918
919 mutex_lock(&rfkill_global_mutex);
920 cur = rfkill_global_states[rfkill->type].cur;
921 rfkill_set_block(rfkill, cur);
922 mutex_unlock(&rfkill_global_mutex);
923}
924
925int __must_check rfkill_register(struct rfkill *rfkill)
926{
927 static unsigned long rfkill_no;
928 struct device *dev = &rfkill->dev;
929 int error;
930
931 BUG_ON(!rfkill);
932
933 mutex_lock(&rfkill_global_mutex);
934
935 if (rfkill->registered) {
936 error = -EALREADY;
937 goto unlock;
938 }
939
940 rfkill->idx = rfkill_no;
941 dev_set_name(dev, "rfkill%lu", rfkill_no);
942 rfkill_no++;
943
944 list_add_tail(&rfkill->node, &rfkill_list);
945
946 error = device_add(dev);
947 if (error)
948 goto remove;
949
950 error = rfkill_led_trigger_register(rfkill);
951 if (error)
952 goto devdel;
953
954 rfkill->registered = true;
955
956 INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
957 INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
958 INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
959
960 if (rfkill->ops->poll)
961 schedule_delayed_work(&rfkill->poll_work,
962 round_jiffies_relative(POLL_INTERVAL));
963
964 if (!rfkill->persistent || rfkill_epo_lock_active) {
965 schedule_work(&rfkill->sync_work);
966 } else {
967#ifdef CONFIG_RFKILL_INPUT
968 bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
969
970 if (!atomic_read(&rfkill_input_disabled))
971 __rfkill_switch_all(rfkill->type, soft_blocked);
972#endif
973 }
974
975 rfkill_send_events(rfkill, RFKILL_OP_ADD);
976
977 mutex_unlock(&rfkill_global_mutex);
978 return 0;
979
980 devdel:
981 device_del(&rfkill->dev);
982 remove:
983 list_del_init(&rfkill->node);
984 unlock:
985 mutex_unlock(&rfkill_global_mutex);
986 return error;
987}
988EXPORT_SYMBOL(rfkill_register);
989
990void rfkill_unregister(struct rfkill *rfkill)
991{
992 BUG_ON(!rfkill);
993
994 if (rfkill->ops->poll)
995 cancel_delayed_work_sync(&rfkill->poll_work);
996
997 cancel_work_sync(&rfkill->uevent_work);
998 cancel_work_sync(&rfkill->sync_work);
999
1000 rfkill->registered = false;
1001
1002 device_del(&rfkill->dev);
1003
1004 mutex_lock(&rfkill_global_mutex);
1005 rfkill_send_events(rfkill, RFKILL_OP_DEL);
1006 list_del_init(&rfkill->node);
1007 mutex_unlock(&rfkill_global_mutex);
1008
1009 rfkill_led_trigger_unregister(rfkill);
1010}
1011EXPORT_SYMBOL(rfkill_unregister);
1012
1013void rfkill_destroy(struct rfkill *rfkill)
1014{
1015 if (rfkill)
1016 put_device(&rfkill->dev);
1017}
1018EXPORT_SYMBOL(rfkill_destroy);
1019
1020static int rfkill_fop_open(struct inode *inode, struct file *file)
1021{
1022 struct rfkill_data *data;
1023 struct rfkill *rfkill;
1024 struct rfkill_int_event *ev, *tmp;
1025
1026 data = kzalloc(sizeof(*data), GFP_KERNEL);
1027 if (!data)
1028 return -ENOMEM;
1029
1030 INIT_LIST_HEAD(&data->events);
1031 mutex_init(&data->mtx);
1032 init_waitqueue_head(&data->read_wait);
1033
1034 mutex_lock(&rfkill_global_mutex);
1035 mutex_lock(&data->mtx);
1036
1037
1038
1039
1040
1041 list_for_each_entry(rfkill, &rfkill_list, node) {
1042 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
1043 if (!ev)
1044 goto free;
1045 rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
1046 list_add_tail(&ev->list, &data->events);
1047 }
1048 list_add(&data->list, &rfkill_fds);
1049 mutex_unlock(&data->mtx);
1050 mutex_unlock(&rfkill_global_mutex);
1051
1052 file->private_data = data;
1053
1054 return nonseekable_open(inode, file);
1055
1056 free:
1057 mutex_unlock(&data->mtx);
1058 mutex_unlock(&rfkill_global_mutex);
1059 mutex_destroy(&data->mtx);
1060 list_for_each_entry_safe(ev, tmp, &data->events, list)
1061 kfree(ev);
1062 kfree(data);
1063 return -ENOMEM;
1064}
1065
1066static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
1067{
1068 struct rfkill_data *data = file->private_data;
1069 unsigned int res = POLLOUT | POLLWRNORM;
1070
1071 poll_wait(file, &data->read_wait, wait);
1072
1073 mutex_lock(&data->mtx);
1074 if (!list_empty(&data->events))
1075 res = POLLIN | POLLRDNORM;
1076 mutex_unlock(&data->mtx);
1077
1078 return res;
1079}
1080
1081static bool rfkill_readable(struct rfkill_data *data)
1082{
1083 bool r;
1084
1085 mutex_lock(&data->mtx);
1086 r = !list_empty(&data->events);
1087 mutex_unlock(&data->mtx);
1088
1089 return r;
1090}
1091
1092static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
1093 size_t count, loff_t *pos)
1094{
1095 struct rfkill_data *data = file->private_data;
1096 struct rfkill_int_event *ev;
1097 unsigned long sz;
1098 int ret;
1099
1100 mutex_lock(&data->mtx);
1101
1102 while (list_empty(&data->events)) {
1103 if (file->f_flags & O_NONBLOCK) {
1104 ret = -EAGAIN;
1105 goto out;
1106 }
1107 mutex_unlock(&data->mtx);
1108 ret = wait_event_interruptible(data->read_wait,
1109 rfkill_readable(data));
1110 mutex_lock(&data->mtx);
1111
1112 if (ret)
1113 goto out;
1114 }
1115
1116 ev = list_first_entry(&data->events, struct rfkill_int_event,
1117 list);
1118
1119 sz = min_t(unsigned long, sizeof(ev->ev), count);
1120 ret = sz;
1121 if (copy_to_user(buf, &ev->ev, sz))
1122 ret = -EFAULT;
1123
1124 list_del(&ev->list);
1125 kfree(ev);
1126 out:
1127 mutex_unlock(&data->mtx);
1128 return ret;
1129}
1130
1131static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
1132 size_t count, loff_t *pos)
1133{
1134 struct rfkill *rfkill;
1135 struct rfkill_event ev;
1136
1137
1138 if (count < RFKILL_EVENT_SIZE_V1 - 1)
1139 return -EINVAL;
1140
1141
1142
1143
1144
1145
1146 count = min(count, sizeof(ev));
1147 if (copy_from_user(&ev, buf, count))
1148 return -EFAULT;
1149
1150 if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL)
1151 return -EINVAL;
1152
1153 if (ev.type >= NUM_RFKILL_TYPES)
1154 return -EINVAL;
1155
1156 mutex_lock(&rfkill_global_mutex);
1157
1158 if (ev.op == RFKILL_OP_CHANGE_ALL) {
1159 if (ev.type == RFKILL_TYPE_ALL) {
1160 enum rfkill_type i;
1161 for (i = 0; i < NUM_RFKILL_TYPES; i++)
1162 rfkill_global_states[i].cur = ev.soft;
1163 } else {
1164 rfkill_global_states[ev.type].cur = ev.soft;
1165 }
1166 }
1167
1168 list_for_each_entry(rfkill, &rfkill_list, node) {
1169 if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL)
1170 continue;
1171
1172 if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL)
1173 continue;
1174
1175 rfkill_set_block(rfkill, ev.soft);
1176 }
1177 mutex_unlock(&rfkill_global_mutex);
1178
1179 return count;
1180}
1181
1182static int rfkill_fop_release(struct inode *inode, struct file *file)
1183{
1184 struct rfkill_data *data = file->private_data;
1185 struct rfkill_int_event *ev, *tmp;
1186
1187 mutex_lock(&rfkill_global_mutex);
1188 list_del(&data->list);
1189 mutex_unlock(&rfkill_global_mutex);
1190
1191 mutex_destroy(&data->mtx);
1192 list_for_each_entry_safe(ev, tmp, &data->events, list)
1193 kfree(ev);
1194
1195#ifdef CONFIG_RFKILL_INPUT
1196 if (data->input_handler)
1197 if (atomic_dec_return(&rfkill_input_disabled) == 0)
1198 printk(KERN_DEBUG "rfkill: input handler enabled\n");
1199#endif
1200
1201 kfree(data);
1202
1203 return 0;
1204}
1205
1206#ifdef CONFIG_RFKILL_INPUT
1207static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
1208 unsigned long arg)
1209{
1210 struct rfkill_data *data = file->private_data;
1211
1212 if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
1213 return -ENOSYS;
1214
1215 if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT)
1216 return -ENOSYS;
1217
1218 mutex_lock(&data->mtx);
1219
1220 if (!data->input_handler) {
1221 if (atomic_inc_return(&rfkill_input_disabled) == 1)
1222 printk(KERN_DEBUG "rfkill: input handler disabled\n");
1223 data->input_handler = true;
1224 }
1225
1226 mutex_unlock(&data->mtx);
1227
1228 return 0;
1229}
1230#endif
1231
1232static const struct file_operations rfkill_fops = {
1233 .owner = THIS_MODULE,
1234 .open = rfkill_fop_open,
1235 .read = rfkill_fop_read,
1236 .write = rfkill_fop_write,
1237 .poll = rfkill_fop_poll,
1238 .release = rfkill_fop_release,
1239#ifdef CONFIG_RFKILL_INPUT
1240 .unlocked_ioctl = rfkill_fop_ioctl,
1241 .compat_ioctl = rfkill_fop_ioctl,
1242#endif
1243 .llseek = no_llseek,
1244};
1245
1246static struct miscdevice rfkill_miscdev = {
1247 .name = "rfkill",
1248 .fops = &rfkill_fops,
1249 .minor = MISC_DYNAMIC_MINOR,
1250};
1251
1252static int __init rfkill_init(void)
1253{
1254 int error;
1255 int i;
1256
1257 for (i = 0; i < NUM_RFKILL_TYPES; i++)
1258 rfkill_global_states[i].cur = !rfkill_default_state;
1259
1260 error = class_register(&rfkill_class);
1261 if (error)
1262 goto out;
1263
1264 error = misc_register(&rfkill_miscdev);
1265 if (error) {
1266 class_unregister(&rfkill_class);
1267 goto out;
1268 }
1269
1270#ifdef CONFIG_RFKILL_INPUT
1271 error = rfkill_handler_init();
1272 if (error) {
1273 misc_deregister(&rfkill_miscdev);
1274 class_unregister(&rfkill_class);
1275 goto out;
1276 }
1277#endif
1278
1279 out:
1280 return error;
1281}
1282subsys_initcall(rfkill_init);
1283
1284static void __exit rfkill_exit(void)
1285{
1286#ifdef CONFIG_RFKILL_INPUT
1287 rfkill_handler_exit();
1288#endif
1289 misc_deregister(&rfkill_miscdev);
1290 class_unregister(&rfkill_class);
1291}
1292module_exit(rfkill_exit);
1293