1
2
3
4
5
6
7
8
9
10
11#include <linux/rtc.h>
12#include <linux/sched.h>
13#include <linux/module.h>
14#include <linux/log2.h>
15#include <linux/workqueue.h>
16
17#define CREATE_TRACE_POINTS
18#include <trace/events/rtc.h>
19
20static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer);
21static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer);
22
23static void rtc_add_offset(struct rtc_device *rtc, struct rtc_time *tm)
24{
25 time64_t secs;
26
27 if (!rtc->offset_secs)
28 return;
29
30 secs = rtc_tm_to_time64(tm);
31
32
33
34
35
36
37
38 if ((rtc->start_secs > rtc->range_min && secs >= rtc->start_secs) ||
39 (rtc->start_secs < rtc->range_min &&
40 secs <= (rtc->start_secs + rtc->range_max - rtc->range_min)))
41 return;
42
43 rtc_time64_to_tm(secs + rtc->offset_secs, tm);
44}
45
46static void rtc_subtract_offset(struct rtc_device *rtc, struct rtc_time *tm)
47{
48 time64_t secs;
49
50 if (!rtc->offset_secs)
51 return;
52
53 secs = rtc_tm_to_time64(tm);
54
55
56
57
58
59
60
61 if (secs >= rtc->range_min && secs <= rtc->range_max)
62 return;
63
64 rtc_time64_to_tm(secs - rtc->offset_secs, tm);
65}
66
67static int rtc_valid_range(struct rtc_device *rtc, struct rtc_time *tm)
68{
69 if (rtc->range_min != rtc->range_max) {
70 time64_t time = rtc_tm_to_time64(tm);
71 time64_t range_min = rtc->set_start_time ? rtc->start_secs :
72 rtc->range_min;
73 timeu64_t range_max = rtc->set_start_time ?
74 (rtc->start_secs + rtc->range_max - rtc->range_min) :
75 rtc->range_max;
76
77 if (time < range_min || time > range_max)
78 return -ERANGE;
79 }
80
81 return 0;
82}
83
84static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
85{
86 int err;
87
88 if (!rtc->ops) {
89 err = -ENODEV;
90 } else if (!rtc->ops->read_time) {
91 err = -EINVAL;
92 } else {
93 memset(tm, 0, sizeof(struct rtc_time));
94 err = rtc->ops->read_time(rtc->dev.parent, tm);
95 if (err < 0) {
96 dev_dbg(&rtc->dev, "read_time: fail to read: %d\n",
97 err);
98 return err;
99 }
100
101 rtc_add_offset(rtc, tm);
102
103 err = rtc_valid_tm(tm);
104 if (err < 0)
105 dev_dbg(&rtc->dev, "read_time: rtc_time isn't valid\n");
106 }
107 return err;
108}
109
110int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
111{
112 int err;
113
114 err = mutex_lock_interruptible(&rtc->ops_lock);
115 if (err)
116 return err;
117
118 err = __rtc_read_time(rtc, tm);
119 mutex_unlock(&rtc->ops_lock);
120
121 trace_rtc_read_time(rtc_tm_to_time64(tm), err);
122 return err;
123}
124EXPORT_SYMBOL_GPL(rtc_read_time);
125
126int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
127{
128 int err, uie;
129
130 err = rtc_valid_tm(tm);
131 if (err != 0)
132 return err;
133
134 err = rtc_valid_range(rtc, tm);
135 if (err)
136 return err;
137
138 rtc_subtract_offset(rtc, tm);
139
140#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
141 uie = rtc->uie_rtctimer.enabled || rtc->uie_irq_active;
142#else
143 uie = rtc->uie_rtctimer.enabled;
144#endif
145 if (uie) {
146 err = rtc_update_irq_enable(rtc, 0);
147 if (err)
148 return err;
149 }
150
151 err = mutex_lock_interruptible(&rtc->ops_lock);
152 if (err)
153 return err;
154
155 if (!rtc->ops)
156 err = -ENODEV;
157 else if (rtc->ops->set_time)
158 err = rtc->ops->set_time(rtc->dev.parent, tm);
159 else
160 err = -EINVAL;
161
162 pm_stay_awake(rtc->dev.parent);
163 mutex_unlock(&rtc->ops_lock);
164
165 schedule_work(&rtc->irqwork);
166
167 if (uie) {
168 err = rtc_update_irq_enable(rtc, 1);
169 if (err)
170 return err;
171 }
172
173 trace_rtc_set_time(rtc_tm_to_time64(tm), err);
174 return err;
175}
176EXPORT_SYMBOL_GPL(rtc_set_time);
177
178static int rtc_read_alarm_internal(struct rtc_device *rtc,
179 struct rtc_wkalrm *alarm)
180{
181 int err;
182
183 err = mutex_lock_interruptible(&rtc->ops_lock);
184 if (err)
185 return err;
186
187 if (!rtc->ops) {
188 err = -ENODEV;
189 } else if (!rtc->ops->read_alarm) {
190 err = -EINVAL;
191 } else {
192 alarm->enabled = 0;
193 alarm->pending = 0;
194 alarm->time.tm_sec = -1;
195 alarm->time.tm_min = -1;
196 alarm->time.tm_hour = -1;
197 alarm->time.tm_mday = -1;
198 alarm->time.tm_mon = -1;
199 alarm->time.tm_year = -1;
200 alarm->time.tm_wday = -1;
201 alarm->time.tm_yday = -1;
202 alarm->time.tm_isdst = -1;
203 err = rtc->ops->read_alarm(rtc->dev.parent, alarm);
204 }
205
206 mutex_unlock(&rtc->ops_lock);
207
208 trace_rtc_read_alarm(rtc_tm_to_time64(&alarm->time), err);
209 return err;
210}
211
212int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
213{
214 int err;
215 struct rtc_time before, now;
216 int first_time = 1;
217 time64_t t_now, t_alm;
218 enum { none, day, month, year } missing = none;
219 unsigned int days;
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263 err = rtc_read_time(rtc, &before);
264 if (err < 0)
265 return err;
266 do {
267 if (!first_time)
268 memcpy(&before, &now, sizeof(struct rtc_time));
269 first_time = 0;
270
271
272 err = rtc_read_alarm_internal(rtc, alarm);
273 if (err)
274 return err;
275
276
277 if (rtc_valid_tm(&alarm->time) == 0) {
278 rtc_add_offset(rtc, &alarm->time);
279 return 0;
280 }
281
282
283 err = rtc_read_time(rtc, &now);
284 if (err < 0)
285 return err;
286
287
288 } while (before.tm_min != now.tm_min ||
289 before.tm_hour != now.tm_hour ||
290 before.tm_mon != now.tm_mon ||
291 before.tm_year != now.tm_year);
292
293
294
295
296 if (alarm->time.tm_sec == -1)
297 alarm->time.tm_sec = now.tm_sec;
298 if (alarm->time.tm_min == -1)
299 alarm->time.tm_min = now.tm_min;
300 if (alarm->time.tm_hour == -1)
301 alarm->time.tm_hour = now.tm_hour;
302
303
304 if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) {
305 alarm->time.tm_mday = now.tm_mday;
306 missing = day;
307 }
308 if ((unsigned int)alarm->time.tm_mon >= 12) {
309 alarm->time.tm_mon = now.tm_mon;
310 if (missing == none)
311 missing = month;
312 }
313 if (alarm->time.tm_year == -1) {
314 alarm->time.tm_year = now.tm_year;
315 if (missing == none)
316 missing = year;
317 }
318
319
320
321
322 err = rtc_valid_tm(&alarm->time);
323 if (err)
324 goto done;
325
326
327 t_now = rtc_tm_to_time64(&now);
328 t_alm = rtc_tm_to_time64(&alarm->time);
329 if (t_now < t_alm)
330 goto done;
331
332 switch (missing) {
333
334
335
336
337
338 case day:
339 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day");
340 t_alm += 24 * 60 * 60;
341 rtc_time64_to_tm(t_alm, &alarm->time);
342 break;
343
344
345
346
347
348
349 case month:
350 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month");
351 do {
352 if (alarm->time.tm_mon < 11) {
353 alarm->time.tm_mon++;
354 } else {
355 alarm->time.tm_mon = 0;
356 alarm->time.tm_year++;
357 }
358 days = rtc_month_days(alarm->time.tm_mon,
359 alarm->time.tm_year);
360 } while (days < alarm->time.tm_mday);
361 break;
362
363
364 case year:
365 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
366 do {
367 alarm->time.tm_year++;
368 } while (!is_leap_year(alarm->time.tm_year + 1900) &&
369 rtc_valid_tm(&alarm->time) != 0);
370 break;
371
372 default:
373 dev_warn(&rtc->dev, "alarm rollover not handled\n");
374 }
375
376 err = rtc_valid_tm(&alarm->time);
377
378done:
379 if (err)
380 dev_warn(&rtc->dev, "invalid alarm value: %ptR\n",
381 &alarm->time);
382
383 return err;
384}
385
386int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
387{
388 int err;
389
390 err = mutex_lock_interruptible(&rtc->ops_lock);
391 if (err)
392 return err;
393 if (!rtc->ops) {
394 err = -ENODEV;
395 } else if (!rtc->ops->read_alarm) {
396 err = -EINVAL;
397 } else {
398 memset(alarm, 0, sizeof(struct rtc_wkalrm));
399 alarm->enabled = rtc->aie_timer.enabled;
400 alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires);
401 }
402 mutex_unlock(&rtc->ops_lock);
403
404 trace_rtc_read_alarm(rtc_tm_to_time64(&alarm->time), err);
405 return err;
406}
407EXPORT_SYMBOL_GPL(rtc_read_alarm);
408
409static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
410{
411 struct rtc_time tm;
412 time64_t now, scheduled;
413 int err;
414
415 err = rtc_valid_tm(&alarm->time);
416 if (err)
417 return err;
418
419 scheduled = rtc_tm_to_time64(&alarm->time);
420
421
422 err = __rtc_read_time(rtc, &tm);
423 if (err)
424 return err;
425 now = rtc_tm_to_time64(&tm);
426 if (scheduled <= now)
427 return -ETIME;
428
429
430
431
432
433
434
435 rtc_subtract_offset(rtc, &alarm->time);
436
437 if (!rtc->ops)
438 err = -ENODEV;
439 else if (!rtc->ops->set_alarm)
440 err = -EINVAL;
441 else
442 err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
443
444 trace_rtc_set_alarm(rtc_tm_to_time64(&alarm->time), err);
445 return err;
446}
447
448int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
449{
450 int err;
451
452 if (!rtc->ops)
453 return -ENODEV;
454 else if (!rtc->ops->set_alarm)
455 return -EINVAL;
456
457 err = rtc_valid_tm(&alarm->time);
458 if (err != 0)
459 return err;
460
461 err = rtc_valid_range(rtc, &alarm->time);
462 if (err)
463 return err;
464
465 err = mutex_lock_interruptible(&rtc->ops_lock);
466 if (err)
467 return err;
468 if (rtc->aie_timer.enabled)
469 rtc_timer_remove(rtc, &rtc->aie_timer);
470
471 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
472 rtc->aie_timer.period = 0;
473 if (alarm->enabled)
474 err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
475
476 mutex_unlock(&rtc->ops_lock);
477
478 return err;
479}
480EXPORT_SYMBOL_GPL(rtc_set_alarm);
481
482
483int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
484{
485 int err;
486 struct rtc_time now;
487
488 err = rtc_valid_tm(&alarm->time);
489 if (err != 0)
490 return err;
491
492 err = rtc_read_time(rtc, &now);
493 if (err)
494 return err;
495
496 err = mutex_lock_interruptible(&rtc->ops_lock);
497 if (err)
498 return err;
499
500 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
501 rtc->aie_timer.period = 0;
502
503
504 if (alarm->enabled && (rtc_tm_to_ktime(now) <
505 rtc->aie_timer.node.expires)) {
506 rtc->aie_timer.enabled = 1;
507 timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
508 trace_rtc_timer_enqueue(&rtc->aie_timer);
509 }
510 mutex_unlock(&rtc->ops_lock);
511 return err;
512}
513EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
514
515int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
516{
517 int err;
518
519 err = mutex_lock_interruptible(&rtc->ops_lock);
520 if (err)
521 return err;
522
523 if (rtc->aie_timer.enabled != enabled) {
524 if (enabled)
525 err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
526 else
527 rtc_timer_remove(rtc, &rtc->aie_timer);
528 }
529
530 if (err)
531 ;
532 else if (!rtc->ops)
533 err = -ENODEV;
534 else if (!rtc->ops->alarm_irq_enable)
535 err = -EINVAL;
536 else
537 err = rtc->ops->alarm_irq_enable(rtc->dev.parent, enabled);
538
539 mutex_unlock(&rtc->ops_lock);
540
541 trace_rtc_alarm_irq_enable(enabled, err);
542 return err;
543}
544EXPORT_SYMBOL_GPL(rtc_alarm_irq_enable);
545
546int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
547{
548 int rc = 0, err;
549
550 err = mutex_lock_interruptible(&rtc->ops_lock);
551 if (err)
552 return err;
553
554#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
555 if (enabled == 0 && rtc->uie_irq_active) {
556 mutex_unlock(&rtc->ops_lock);
557 return rtc_dev_update_irq_enable_emul(rtc, 0);
558 }
559#endif
560
561 if (rtc->uie_rtctimer.enabled == enabled)
562 goto out;
563
564 if (rtc->uie_unsupported) {
565 err = -EINVAL;
566 goto out;
567 }
568
569 if (enabled) {
570 struct rtc_time tm;
571 ktime_t now, onesec;
572
573 rc = __rtc_read_time(rtc, &tm);
574 if (rc)
575 goto out;
576 onesec = ktime_set(1, 0);
577 now = rtc_tm_to_ktime(tm);
578 rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
579 rtc->uie_rtctimer.period = ktime_set(1, 0);
580 err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
581 } else {
582 rtc_timer_remove(rtc, &rtc->uie_rtctimer);
583 }
584
585out:
586 mutex_unlock(&rtc->ops_lock);
587
588
589
590
591
592
593
594 if (rc)
595 return rc;
596
597#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
598
599
600
601
602
603 if (err == -EINVAL)
604 err = rtc_dev_update_irq_enable_emul(rtc, enabled);
605#endif
606 return err;
607}
608EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
609
610
611
612
613
614
615
616
617
618
619
620void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
621{
622 unsigned long flags;
623
624
625 spin_lock_irqsave(&rtc->irq_lock, flags);
626 rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF | mode);
627 spin_unlock_irqrestore(&rtc->irq_lock, flags);
628
629 wake_up_interruptible(&rtc->irq_queue);
630 kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
631}
632
633
634
635
636
637
638
639void rtc_aie_update_irq(struct rtc_device *rtc)
640{
641 rtc_handle_legacy_irq(rtc, 1, RTC_AF);
642}
643
644
645
646
647
648
649
650void rtc_uie_update_irq(struct rtc_device *rtc)
651{
652 rtc_handle_legacy_irq(rtc, 1, RTC_UF);
653}
654
655
656
657
658
659
660
661
662
663enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
664{
665 struct rtc_device *rtc;
666 ktime_t period;
667 u64 count;
668
669 rtc = container_of(timer, struct rtc_device, pie_timer);
670
671 period = NSEC_PER_SEC / rtc->irq_freq;
672 count = hrtimer_forward_now(timer, period);
673
674 rtc_handle_legacy_irq(rtc, count, RTC_PF);
675
676 return HRTIMER_RESTART;
677}
678
679
680
681
682
683
684
685
686void rtc_update_irq(struct rtc_device *rtc,
687 unsigned long num, unsigned long events)
688{
689 if (IS_ERR_OR_NULL(rtc))
690 return;
691
692 pm_stay_awake(rtc->dev.parent);
693 schedule_work(&rtc->irqwork);
694}
695EXPORT_SYMBOL_GPL(rtc_update_irq);
696
697struct rtc_device *rtc_class_open(const char *name)
698{
699 struct device *dev;
700 struct rtc_device *rtc = NULL;
701
702 dev = class_find_device_by_name(rtc_class, name);
703 if (dev)
704 rtc = to_rtc_device(dev);
705
706 if (rtc) {
707 if (!try_module_get(rtc->owner)) {
708 put_device(dev);
709 rtc = NULL;
710 }
711 }
712
713 return rtc;
714}
715EXPORT_SYMBOL_GPL(rtc_class_open);
716
717void rtc_class_close(struct rtc_device *rtc)
718{
719 module_put(rtc->owner);
720 put_device(&rtc->dev);
721}
722EXPORT_SYMBOL_GPL(rtc_class_close);
723
724static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled)
725{
726
727
728
729
730
731
732
733
734
735
736 if (hrtimer_try_to_cancel(&rtc->pie_timer) < 0)
737 return -1;
738
739 if (enabled) {
740 ktime_t period = NSEC_PER_SEC / rtc->irq_freq;
741
742 hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
743 }
744 return 0;
745}
746
747
748
749
750
751
752
753
754
755
756int rtc_irq_set_state(struct rtc_device *rtc, int enabled)
757{
758 int err = 0;
759
760 while (rtc_update_hrtimer(rtc, enabled) < 0)
761 cpu_relax();
762
763 rtc->pie_enabled = enabled;
764
765 trace_rtc_irq_set_state(enabled, err);
766 return err;
767}
768
769
770
771
772
773
774
775
776
777
778int rtc_irq_set_freq(struct rtc_device *rtc, int freq)
779{
780 int err = 0;
781
782 if (freq <= 0 || freq > RTC_MAX_FREQ)
783 return -EINVAL;
784
785 rtc->irq_freq = freq;
786 while (rtc->pie_enabled && rtc_update_hrtimer(rtc, 1) < 0)
787 cpu_relax();
788
789 trace_rtc_irq_set_freq(freq, err);
790 return err;
791}
792
793
794
795
796
797
798
799
800
801
802
803
804
805static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
806{
807 struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
808 struct rtc_time tm;
809 ktime_t now;
810
811 timer->enabled = 1;
812 __rtc_read_time(rtc, &tm);
813 now = rtc_tm_to_ktime(tm);
814
815
816 while (next) {
817 if (next->expires >= now)
818 break;
819 next = timerqueue_iterate_next(next);
820 }
821
822 timerqueue_add(&rtc->timerqueue, &timer->node);
823 trace_rtc_timer_enqueue(timer);
824 if (!next || ktime_before(timer->node.expires, next->expires)) {
825 struct rtc_wkalrm alarm;
826 int err;
827
828 alarm.time = rtc_ktime_to_tm(timer->node.expires);
829 alarm.enabled = 1;
830 err = __rtc_set_alarm(rtc, &alarm);
831 if (err == -ETIME) {
832 pm_stay_awake(rtc->dev.parent);
833 schedule_work(&rtc->irqwork);
834 } else if (err) {
835 timerqueue_del(&rtc->timerqueue, &timer->node);
836 trace_rtc_timer_dequeue(timer);
837 timer->enabled = 0;
838 return err;
839 }
840 }
841 return 0;
842}
843
844static void rtc_alarm_disable(struct rtc_device *rtc)
845{
846 if (!rtc->ops || !rtc->ops->alarm_irq_enable)
847 return;
848
849 rtc->ops->alarm_irq_enable(rtc->dev.parent, false);
850 trace_rtc_alarm_irq_enable(0, 0);
851}
852
853
854
855
856
857
858
859
860
861
862
863
864
865static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
866{
867 struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
868
869 timerqueue_del(&rtc->timerqueue, &timer->node);
870 trace_rtc_timer_dequeue(timer);
871 timer->enabled = 0;
872 if (next == &timer->node) {
873 struct rtc_wkalrm alarm;
874 int err;
875
876 next = timerqueue_getnext(&rtc->timerqueue);
877 if (!next) {
878 rtc_alarm_disable(rtc);
879 return;
880 }
881 alarm.time = rtc_ktime_to_tm(next->expires);
882 alarm.enabled = 1;
883 err = __rtc_set_alarm(rtc, &alarm);
884 if (err == -ETIME) {
885 pm_stay_awake(rtc->dev.parent);
886 schedule_work(&rtc->irqwork);
887 }
888 }
889}
890
891
892
893
894
895
896
897
898
899
900void rtc_timer_do_work(struct work_struct *work)
901{
902 struct rtc_timer *timer;
903 struct timerqueue_node *next;
904 ktime_t now;
905 struct rtc_time tm;
906
907 struct rtc_device *rtc =
908 container_of(work, struct rtc_device, irqwork);
909
910 mutex_lock(&rtc->ops_lock);
911again:
912 __rtc_read_time(rtc, &tm);
913 now = rtc_tm_to_ktime(tm);
914 while ((next = timerqueue_getnext(&rtc->timerqueue))) {
915 if (next->expires > now)
916 break;
917
918
919 timer = container_of(next, struct rtc_timer, node);
920 timerqueue_del(&rtc->timerqueue, &timer->node);
921 trace_rtc_timer_dequeue(timer);
922 timer->enabled = 0;
923 if (timer->func)
924 timer->func(timer->rtc);
925
926 trace_rtc_timer_fired(timer);
927
928 if (ktime_to_ns(timer->period)) {
929 timer->node.expires = ktime_add(timer->node.expires,
930 timer->period);
931 timer->enabled = 1;
932 timerqueue_add(&rtc->timerqueue, &timer->node);
933 trace_rtc_timer_enqueue(timer);
934 }
935 }
936
937
938 if (next) {
939 struct rtc_wkalrm alarm;
940 int err;
941 int retry = 3;
942
943 alarm.time = rtc_ktime_to_tm(next->expires);
944 alarm.enabled = 1;
945reprogram:
946 err = __rtc_set_alarm(rtc, &alarm);
947 if (err == -ETIME) {
948 goto again;
949 } else if (err) {
950 if (retry-- > 0)
951 goto reprogram;
952
953 timer = container_of(next, struct rtc_timer, node);
954 timerqueue_del(&rtc->timerqueue, &timer->node);
955 trace_rtc_timer_dequeue(timer);
956 timer->enabled = 0;
957 dev_err(&rtc->dev, "__rtc_set_alarm: err=%d\n", err);
958 goto again;
959 }
960 } else {
961 rtc_alarm_disable(rtc);
962 }
963
964 pm_relax(rtc->dev.parent);
965 mutex_unlock(&rtc->ops_lock);
966}
967
968
969
970
971
972
973
974
975void rtc_timer_init(struct rtc_timer *timer, void (*f)(struct rtc_device *r),
976 struct rtc_device *rtc)
977{
978 timerqueue_init(&timer->node);
979 timer->enabled = 0;
980 timer->func = f;
981 timer->rtc = rtc;
982}
983
984
985
986
987
988
989
990
991
992int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer,
993 ktime_t expires, ktime_t period)
994{
995 int ret = 0;
996
997 mutex_lock(&rtc->ops_lock);
998 if (timer->enabled)
999 rtc_timer_remove(rtc, timer);
1000
1001 timer->node.expires = expires;
1002 timer->period = period;
1003
1004 ret = rtc_timer_enqueue(rtc, timer);
1005
1006 mutex_unlock(&rtc->ops_lock);
1007 return ret;
1008}
1009
1010
1011
1012
1013
1014
1015
1016void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer)
1017{
1018 mutex_lock(&rtc->ops_lock);
1019 if (timer->enabled)
1020 rtc_timer_remove(rtc, timer);
1021 mutex_unlock(&rtc->ops_lock);
1022}
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035int rtc_read_offset(struct rtc_device *rtc, long *offset)
1036{
1037 int ret;
1038
1039 if (!rtc->ops)
1040 return -ENODEV;
1041
1042 if (!rtc->ops->read_offset)
1043 return -EINVAL;
1044
1045 mutex_lock(&rtc->ops_lock);
1046 ret = rtc->ops->read_offset(rtc->dev.parent, offset);
1047 mutex_unlock(&rtc->ops_lock);
1048
1049 trace_rtc_read_offset(*offset, ret);
1050 return ret;
1051}
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070int rtc_set_offset(struct rtc_device *rtc, long offset)
1071{
1072 int ret;
1073
1074 if (!rtc->ops)
1075 return -ENODEV;
1076
1077 if (!rtc->ops->set_offset)
1078 return -EINVAL;
1079
1080 mutex_lock(&rtc->ops_lock);
1081 ret = rtc->ops->set_offset(rtc->dev.parent, offset);
1082 mutex_unlock(&rtc->ops_lock);
1083
1084 trace_rtc_set_offset(offset, ret);
1085 return ret;
1086}
1087