1
2
3
4
5
6
7
8
9
10
11
12
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/compat.h>
17#include <linux/module.h>
18#include <linux/rtc.h>
19#include <linux/sched/signal.h>
20#include "rtc-core.h"
21
22static dev_t rtc_devt;
23
24#define RTC_DEV_MAX 16
25
26static int rtc_dev_open(struct inode *inode, struct file *file)
27{
28 struct rtc_device *rtc = container_of(inode->i_cdev,
29 struct rtc_device, char_dev);
30
31 if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags))
32 return -EBUSY;
33
34 file->private_data = rtc;
35
36 spin_lock_irq(&rtc->irq_lock);
37 rtc->irq_data = 0;
38 spin_unlock_irq(&rtc->irq_lock);
39
40 return 0;
41}
42
43#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
44
45
46
47
48static void rtc_uie_task(struct work_struct *work)
49{
50 struct rtc_device *rtc =
51 container_of(work, struct rtc_device, uie_task);
52 struct rtc_time tm;
53 int num = 0;
54 int err;
55
56 err = rtc_read_time(rtc, &tm);
57
58 spin_lock_irq(&rtc->irq_lock);
59 if (rtc->stop_uie_polling || err) {
60 rtc->uie_task_active = 0;
61 } else if (rtc->oldsecs != tm.tm_sec) {
62 num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
63 rtc->oldsecs = tm.tm_sec;
64 rtc->uie_timer.expires = jiffies + HZ - (HZ/10);
65 rtc->uie_timer_active = 1;
66 rtc->uie_task_active = 0;
67 add_timer(&rtc->uie_timer);
68 } else if (schedule_work(&rtc->uie_task) == 0) {
69 rtc->uie_task_active = 0;
70 }
71 spin_unlock_irq(&rtc->irq_lock);
72 if (num)
73 rtc_handle_legacy_irq(rtc, num, RTC_UF);
74}
75static void rtc_uie_timer(struct timer_list *t)
76{
77 struct rtc_device *rtc = from_timer(rtc, t, uie_timer);
78 unsigned long flags;
79
80 spin_lock_irqsave(&rtc->irq_lock, flags);
81 rtc->uie_timer_active = 0;
82 rtc->uie_task_active = 1;
83 if ((schedule_work(&rtc->uie_task) == 0))
84 rtc->uie_task_active = 0;
85 spin_unlock_irqrestore(&rtc->irq_lock, flags);
86}
87
88static int clear_uie(struct rtc_device *rtc)
89{
90 spin_lock_irq(&rtc->irq_lock);
91 if (rtc->uie_irq_active) {
92 rtc->stop_uie_polling = 1;
93 if (rtc->uie_timer_active) {
94 spin_unlock_irq(&rtc->irq_lock);
95 del_timer_sync(&rtc->uie_timer);
96 spin_lock_irq(&rtc->irq_lock);
97 rtc->uie_timer_active = 0;
98 }
99 if (rtc->uie_task_active) {
100 spin_unlock_irq(&rtc->irq_lock);
101 flush_scheduled_work();
102 spin_lock_irq(&rtc->irq_lock);
103 }
104 rtc->uie_irq_active = 0;
105 }
106 spin_unlock_irq(&rtc->irq_lock);
107 return 0;
108}
109
110static int set_uie(struct rtc_device *rtc)
111{
112 struct rtc_time tm;
113 int err;
114
115 err = rtc_read_time(rtc, &tm);
116 if (err)
117 return err;
118 spin_lock_irq(&rtc->irq_lock);
119 if (!rtc->uie_irq_active) {
120 rtc->uie_irq_active = 1;
121 rtc->stop_uie_polling = 0;
122 rtc->oldsecs = tm.tm_sec;
123 rtc->uie_task_active = 1;
124 if (schedule_work(&rtc->uie_task) == 0)
125 rtc->uie_task_active = 0;
126 }
127 rtc->irq_data = 0;
128 spin_unlock_irq(&rtc->irq_lock);
129 return 0;
130}
131
132int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled)
133{
134 if (enabled)
135 return set_uie(rtc);
136 else
137 return clear_uie(rtc);
138}
139EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul);
140
141#endif
142
143static ssize_t
144rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
145{
146 struct rtc_device *rtc = file->private_data;
147
148 DECLARE_WAITQUEUE(wait, current);
149 unsigned long data;
150 ssize_t ret;
151
152 if (count != sizeof(unsigned int) && count < sizeof(unsigned long))
153 return -EINVAL;
154
155 add_wait_queue(&rtc->irq_queue, &wait);
156 do {
157 __set_current_state(TASK_INTERRUPTIBLE);
158
159 spin_lock_irq(&rtc->irq_lock);
160 data = rtc->irq_data;
161 rtc->irq_data = 0;
162 spin_unlock_irq(&rtc->irq_lock);
163
164 if (data != 0) {
165 ret = 0;
166 break;
167 }
168 if (file->f_flags & O_NONBLOCK) {
169 ret = -EAGAIN;
170 break;
171 }
172 if (signal_pending(current)) {
173 ret = -ERESTARTSYS;
174 break;
175 }
176 schedule();
177 } while (1);
178 set_current_state(TASK_RUNNING);
179 remove_wait_queue(&rtc->irq_queue, &wait);
180
181 if (ret == 0) {
182
183 if (rtc->ops->read_callback)
184 data = rtc->ops->read_callback(rtc->dev.parent,
185 data);
186
187 if (sizeof(int) != sizeof(long) &&
188 count == sizeof(unsigned int))
189 ret = put_user(data, (unsigned int __user *)buf) ?:
190 sizeof(unsigned int);
191 else
192 ret = put_user(data, (unsigned long __user *)buf) ?:
193 sizeof(unsigned long);
194 }
195 return ret;
196}
197
198static __poll_t rtc_dev_poll(struct file *file, poll_table *wait)
199{
200 struct rtc_device *rtc = file->private_data;
201 unsigned long data;
202
203 poll_wait(file, &rtc->irq_queue, wait);
204
205 data = rtc->irq_data;
206
207 return (data != 0) ? (EPOLLIN | EPOLLRDNORM) : 0;
208}
209
210static long rtc_dev_ioctl(struct file *file,
211 unsigned int cmd, unsigned long arg)
212{
213 int err = 0;
214 struct rtc_device *rtc = file->private_data;
215 const struct rtc_class_ops *ops = rtc->ops;
216 struct rtc_time tm;
217 struct rtc_wkalrm alarm;
218 void __user *uarg = (void __user *) arg;
219
220 err = mutex_lock_interruptible(&rtc->ops_lock);
221 if (err)
222 return err;
223
224
225
226
227
228 switch (cmd) {
229 case RTC_EPOCH_SET:
230 case RTC_SET_TIME:
231 if (!capable(CAP_SYS_TIME))
232 err = -EACCES;
233 break;
234
235 case RTC_IRQP_SET:
236 if (arg > rtc->max_user_freq && !capable(CAP_SYS_RESOURCE))
237 err = -EACCES;
238 break;
239
240 case RTC_PIE_ON:
241 if (rtc->irq_freq > rtc->max_user_freq &&
242 !capable(CAP_SYS_RESOURCE))
243 err = -EACCES;
244 break;
245 }
246
247 if (err)
248 goto done;
249
250
251
252
253
254
255
256
257
258
259
260
261 switch (cmd) {
262 case RTC_ALM_READ:
263 mutex_unlock(&rtc->ops_lock);
264
265 err = rtc_read_alarm(rtc, &alarm);
266 if (err < 0)
267 return err;
268
269 if (copy_to_user(uarg, &alarm.time, sizeof(tm)))
270 err = -EFAULT;
271 return err;
272
273 case RTC_ALM_SET:
274 mutex_unlock(&rtc->ops_lock);
275
276 if (copy_from_user(&alarm.time, uarg, sizeof(tm)))
277 return -EFAULT;
278
279 alarm.enabled = 0;
280 alarm.pending = 0;
281 alarm.time.tm_wday = -1;
282 alarm.time.tm_yday = -1;
283 alarm.time.tm_isdst = -1;
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298 {
299 time64_t now, then;
300
301 err = rtc_read_time(rtc, &tm);
302 if (err < 0)
303 return err;
304 now = rtc_tm_to_time64(&tm);
305
306 alarm.time.tm_mday = tm.tm_mday;
307 alarm.time.tm_mon = tm.tm_mon;
308 alarm.time.tm_year = tm.tm_year;
309 err = rtc_valid_tm(&alarm.time);
310 if (err < 0)
311 return err;
312 then = rtc_tm_to_time64(&alarm.time);
313
314
315 if (then < now) {
316 rtc_time64_to_tm(now + 24 * 60 * 60, &tm);
317 alarm.time.tm_mday = tm.tm_mday;
318 alarm.time.tm_mon = tm.tm_mon;
319 alarm.time.tm_year = tm.tm_year;
320 }
321 }
322
323 return rtc_set_alarm(rtc, &alarm);
324
325 case RTC_RD_TIME:
326 mutex_unlock(&rtc->ops_lock);
327
328 err = rtc_read_time(rtc, &tm);
329 if (err < 0)
330 return err;
331
332 if (copy_to_user(uarg, &tm, sizeof(tm)))
333 err = -EFAULT;
334 return err;
335
336 case RTC_SET_TIME:
337 mutex_unlock(&rtc->ops_lock);
338
339 if (copy_from_user(&tm, uarg, sizeof(tm)))
340 return -EFAULT;
341
342 return rtc_set_time(rtc, &tm);
343
344 case RTC_PIE_ON:
345 err = rtc_irq_set_state(rtc, NULL, 1);
346 break;
347
348 case RTC_PIE_OFF:
349 err = rtc_irq_set_state(rtc, NULL, 0);
350 break;
351
352 case RTC_AIE_ON:
353 mutex_unlock(&rtc->ops_lock);
354 return rtc_alarm_irq_enable(rtc, 1);
355
356 case RTC_AIE_OFF:
357 mutex_unlock(&rtc->ops_lock);
358 return rtc_alarm_irq_enable(rtc, 0);
359
360 case RTC_UIE_ON:
361 mutex_unlock(&rtc->ops_lock);
362 return rtc_update_irq_enable(rtc, 1);
363
364 case RTC_UIE_OFF:
365 mutex_unlock(&rtc->ops_lock);
366 return rtc_update_irq_enable(rtc, 0);
367
368 case RTC_IRQP_SET:
369 err = rtc_irq_set_freq(rtc, NULL, arg);
370 break;
371 case RTC_IRQP_READ:
372 err = put_user(rtc->irq_freq, (unsigned long __user *)uarg);
373 break;
374
375 case RTC_WKALM_SET:
376 mutex_unlock(&rtc->ops_lock);
377 if (copy_from_user(&alarm, uarg, sizeof(alarm)))
378 return -EFAULT;
379
380 return rtc_set_alarm(rtc, &alarm);
381
382 case RTC_WKALM_RD:
383 mutex_unlock(&rtc->ops_lock);
384 err = rtc_read_alarm(rtc, &alarm);
385 if (err < 0)
386 return err;
387
388 if (copy_to_user(uarg, &alarm, sizeof(alarm)))
389 err = -EFAULT;
390 return err;
391
392 default:
393
394 if (ops->ioctl) {
395 err = ops->ioctl(rtc->dev.parent, cmd, arg);
396 if (err == -ENOIOCTLCMD)
397 err = -ENOTTY;
398 } else
399 err = -ENOTTY;
400 break;
401 }
402
403done:
404 mutex_unlock(&rtc->ops_lock);
405 return err;
406}
407
408#ifdef CONFIG_COMPAT
409#define RTC_IRQP_SET32 _IOW('p', 0x0c, __u32)
410#define RTC_IRQP_READ32 _IOR('p', 0x0b, __u32)
411#define RTC_EPOCH_SET32 _IOW('p', 0x0e, __u32)
412
413static long rtc_dev_compat_ioctl(struct file *file,
414 unsigned int cmd, unsigned long arg)
415{
416 struct rtc_device *rtc = file->private_data;
417 void __user *uarg = compat_ptr(arg);
418
419 switch (cmd) {
420 case RTC_IRQP_READ32:
421 return put_user(rtc->irq_freq, (__u32 __user *)uarg);
422
423 case RTC_IRQP_SET32:
424
425 return rtc_dev_ioctl(file, RTC_IRQP_SET, arg);
426
427 case RTC_EPOCH_SET32:
428
429 return rtc_dev_ioctl(file, RTC_EPOCH_SET, arg);
430 }
431
432 return rtc_dev_ioctl(file, cmd, (unsigned long)uarg);
433}
434#endif
435
436static int rtc_dev_fasync(int fd, struct file *file, int on)
437{
438 struct rtc_device *rtc = file->private_data;
439 return fasync_helper(fd, file, on, &rtc->async_queue);
440}
441
442static int rtc_dev_release(struct inode *inode, struct file *file)
443{
444 struct rtc_device *rtc = file->private_data;
445
446
447
448
449
450
451
452
453
454
455
456 rtc_dev_ioctl(file, RTC_UIE_OFF, 0);
457 rtc_update_irq_enable(rtc, 0);
458 rtc_irq_set_state(rtc, NULL, 0);
459
460 clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
461 return 0;
462}
463
464static const struct file_operations rtc_dev_fops = {
465 .owner = THIS_MODULE,
466 .llseek = no_llseek,
467 .read = rtc_dev_read,
468 .poll = rtc_dev_poll,
469 .unlocked_ioctl = rtc_dev_ioctl,
470#ifdef CONFIG_COMPAT
471 .compat_ioctl = rtc_dev_compat_ioctl,
472#endif
473 .open = rtc_dev_open,
474 .release = rtc_dev_release,
475 .fasync = rtc_dev_fasync,
476};
477
478
479
480void rtc_dev_prepare(struct rtc_device *rtc)
481{
482 if (!rtc_devt)
483 return;
484
485 if (rtc->id >= RTC_DEV_MAX) {
486 dev_dbg(&rtc->dev, "too many RTC devices\n");
487 return;
488 }
489
490 rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id);
491
492#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
493 INIT_WORK(&rtc->uie_task, rtc_uie_task);
494 timer_setup(&rtc->uie_timer, rtc_uie_timer, 0);
495#endif
496
497 cdev_init(&rtc->char_dev, &rtc_dev_fops);
498 rtc->char_dev.owner = rtc->owner;
499}
500
501void __init rtc_dev_init(void)
502{
503 int err;
504
505 err = alloc_chrdev_region(&rtc_devt, 0, RTC_DEV_MAX, "rtc");
506 if (err < 0)
507 pr_err("failed to allocate char dev region\n");
508}
509
510void __exit rtc_dev_exit(void)
511{
512 if (rtc_devt)
513 unregister_chrdev_region(rtc_devt, RTC_DEV_MAX);
514}
515