1
2
3
4
5
6
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10#include <linux/module.h>
11#include <linux/interrupt.h>
12#include <linux/platform_device.h>
13#include <linux/clk.h>
14#include <linux/of_address.h>
15#include <linux/iopoll.h>
16
17#include <linux/iio/iio.h>
18#include <linux/iio/sysfs.h>
19#include <linux/iio/events.h>
20#include <linux/iio/buffer.h>
21#include <linux/io.h>
22
23#include "xilinx-ams.h"
24#include <linux/delay.h>
25
26static const unsigned int AMS_UNMASK_TIMEOUT = 500;
27
28static inline void ams_read_reg(struct ams *ams, unsigned int offset, u32 *data)
29{
30 *data = readl(ams->base + offset);
31}
32
33static inline void ams_write_reg(struct ams *ams, unsigned int offset, u32 data)
34{
35 writel(data, ams->base + offset);
36}
37
38static inline void ams_update_reg(struct ams *ams, unsigned int offset,
39 u32 mask, u32 data)
40{
41 u32 val;
42
43 ams_read_reg(ams, offset, &val);
44 ams_write_reg(ams, offset, (val & ~mask) | (mask & data));
45}
46
47static inline void ams_ps_read_reg(struct ams *ams, unsigned int offset,
48 u32 *data)
49{
50 *data = readl(ams->ps_base + offset);
51}
52
53static inline void ams_ps_write_reg(struct ams *ams, unsigned int offset,
54 u32 data)
55{
56 writel(data, ams->ps_base + offset);
57}
58
59static inline void ams_ps_update_reg(struct ams *ams, unsigned int offset,
60 u32 mask, u32 data)
61{
62 u32 val;
63
64 ams_ps_read_reg(ams, offset, &val);
65 ams_ps_write_reg(ams, offset, (val & ~mask) | (data & mask));
66}
67
68static inline void ams_apb_pl_read_reg(struct ams *ams, unsigned int offset,
69 u32 *data)
70{
71 *data = readl(ams->pl_base + offset);
72}
73
74static inline void ams_apb_pl_write_reg(struct ams *ams, unsigned int offset,
75 u32 data)
76{
77 writel(data, ams->pl_base + offset);
78}
79
80static inline void ams_apb_pl_update_reg(struct ams *ams, unsigned int offset,
81 u32 mask, u32 data)
82{
83 u32 val;
84
85 ams_apb_pl_read_reg(ams, offset, &val);
86 ams_apb_pl_write_reg(ams, offset, (val & ~mask) | (data & mask));
87}
88
89static void ams_update_intrmask(struct ams *ams, u64 mask, u64 val)
90{
91
92
93
94 ams->intr_mask &= ~mask;
95 ams->intr_mask |= (val & mask);
96
97 ams_write_reg(ams, AMS_IER_0, ~(ams->intr_mask | ams->masked_alarm));
98 ams_write_reg(ams, AMS_IER_1,
99 ~(ams->intr_mask >> AMS_ISR1_INTR_MASK_SHIFT));
100 ams_write_reg(ams, AMS_IDR_0, ams->intr_mask | ams->masked_alarm);
101 ams_write_reg(ams, AMS_IDR_1,
102 ams->intr_mask >> AMS_ISR1_INTR_MASK_SHIFT);
103}
104
105static void iio_ams_disable_all_alarm(struct ams *ams)
106{
107
108 if (ams->ps_base) {
109 ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK,
110 AMS_REGCFG1_ALARM_MASK);
111 ams_ps_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK,
112 AMS_REGCFG3_ALARM_MASK);
113 }
114
115
116 if (ams->pl_base) {
117 ams->pl_bus->update(ams, AMS_REG_CONFIG1,
118 AMS_REGCFG1_ALARM_MASK,
119 AMS_REGCFG1_ALARM_MASK);
120 ams->pl_bus->update(ams, AMS_REG_CONFIG3,
121 AMS_REGCFG3_ALARM_MASK,
122 AMS_REGCFG3_ALARM_MASK);
123 }
124}
125
126static void iio_ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
127{
128 u32 cfg;
129 unsigned long flags;
130 unsigned long pl_alarm_mask;
131
132 if (ams->ps_base) {
133
134 cfg = ~((alarm_mask & AMS_ISR0_ALARM_2_TO_0_MASK) <<
135 AMS_CONF1_ALARM_2_TO_0_SHIFT);
136 cfg &= ~((alarm_mask & AMS_ISR0_ALARM_6_TO_3_MASK) <<
137 AMS_CONF1_ALARM_6_TO_3_SHIFT);
138 ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK,
139 cfg);
140
141 cfg = ~((alarm_mask >> AMS_CONF3_ALARM_12_TO_7_SHIFT) &
142 AMS_ISR0_ALARM_12_TO_7_MASK);
143 ams_ps_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK,
144 cfg);
145 }
146
147 if (ams->pl_base) {
148 pl_alarm_mask = (alarm_mask >> AMS_PL_ALARM_START);
149
150 cfg = ~((pl_alarm_mask & AMS_ISR0_ALARM_2_TO_0_MASK) <<
151 AMS_CONF1_ALARM_2_TO_0_SHIFT);
152 cfg &= ~((pl_alarm_mask & AMS_ISR0_ALARM_6_TO_3_MASK) <<
153 AMS_CONF1_ALARM_6_TO_3_SHIFT);
154 ams->pl_bus->update(ams, AMS_REG_CONFIG1,
155 AMS_REGCFG1_ALARM_MASK, cfg);
156
157 cfg = ~((pl_alarm_mask >> AMS_CONF3_ALARM_12_TO_7_SHIFT) &
158 AMS_ISR0_ALARM_12_TO_7_MASK);
159 ams->pl_bus->update(ams, AMS_REG_CONFIG3,
160 AMS_REGCFG3_ALARM_MASK, cfg);
161 }
162
163 spin_lock_irqsave(&ams->lock, flags);
164 ams_update_intrmask(ams, AMS_ISR0_ALARM_MASK, ~alarm_mask);
165 spin_unlock_irqrestore(&ams->lock, flags);
166}
167
168static void ams_enable_channel_sequence(struct ams *ams)
169{
170 int i;
171 unsigned long long scan_mask;
172 struct iio_dev *indio_dev = ams->indio_dev;
173
174
175
176
177
178
179 scan_mask = 1 | (1 << PS_SEQ_MAX);
180 for (i = 0; i < indio_dev->num_channels; i++)
181 scan_mask |= BIT(indio_dev->channels[i].scan_index);
182
183 if (ams->ps_base) {
184
185 ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
186 AMS_CONF1_SEQ_DEFAULT);
187
188
189 ams_ps_write_reg(ams, AMS_REG_SEQ_CH0,
190 scan_mask & AMS_REG_SEQ0_MASK);
191 ams_ps_write_reg(ams, AMS_REG_SEQ_CH2, AMS_REG_SEQ2_MASK &
192 (scan_mask >> AMS_REG_SEQ2_MASK_SHIFT));
193
194
195 ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
196 AMS_CONF1_SEQ_CONTINUOUS);
197 }
198
199 if (ams->pl_base) {
200
201 ams->pl_bus->update(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
202 AMS_CONF1_SEQ_DEFAULT);
203
204
205 scan_mask = (scan_mask >> PS_SEQ_MAX);
206 ams->pl_bus->write(ams, AMS_REG_SEQ_CH0,
207 scan_mask & AMS_REG_SEQ0_MASK);
208 ams->pl_bus->write(ams, AMS_REG_SEQ_CH2, AMS_REG_SEQ2_MASK &
209 (scan_mask >> AMS_REG_SEQ2_MASK_SHIFT));
210 ams->pl_bus->write(ams, AMS_REG_SEQ_CH1, AMS_REG_SEQ1_MASK &
211 (scan_mask >> AMS_REG_SEQ1_MASK_SHIFT));
212
213
214 ams->pl_bus->update(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
215 AMS_CONF1_SEQ_CONTINUOUS);
216 }
217}
218
219static int iio_ams_init_device(struct ams *ams)
220{
221 int ret = 0;
222 u32 reg;
223
224
225 if (ams->ps_base) {
226 ams_ps_write_reg(ams, AMS_VP_VN, AMS_PS_RESET_VALUE);
227
228 ret = readl_poll_timeout(ams->base + AMS_PS_CSTS, reg,
229 (reg & AMS_PS_CSTS_PS_READY) ==
230 AMS_PS_CSTS_PS_READY, 0,
231 AMS_INIT_TIMEOUT);
232 if (ret)
233 return ret;
234
235
236 ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
237 AMS_CONF1_SEQ_DEFAULT);
238 }
239
240 if (ams->pl_base) {
241 ams->pl_bus->write(ams, AMS_VP_VN, AMS_PL_RESET_VALUE);
242
243 ret = readl_poll_timeout(ams->base + AMS_PL_CSTS, reg,
244 (reg & AMS_PL_CSTS_ACCESS_MASK) ==
245 AMS_PL_CSTS_ACCESS_MASK, 0,
246 AMS_INIT_TIMEOUT);
247 if (ret)
248 return ret;
249
250
251 ams->pl_bus->update(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
252 AMS_CONF1_SEQ_DEFAULT);
253 }
254
255 iio_ams_disable_all_alarm(ams);
256
257
258 ams_update_intrmask(ams, ~0, ~0);
259
260
261 ams_write_reg(ams, AMS_ISR_0, AMS_ISR0_ALARM_MASK);
262 ams_write_reg(ams, AMS_ISR_1, AMS_ISR1_ALARM_MASK);
263
264 return ret;
265}
266
267static void ams_enable_single_channel(struct ams *ams, unsigned int offset)
268{
269 u8 channel_num = 0;
270
271 switch (offset) {
272 case AMS_VCC_PSPLL0:
273 channel_num = AMS_VCC_PSPLL0_CH;
274 break;
275 case AMS_VCC_PSPLL3:
276 channel_num = AMS_VCC_PSPLL3_CH;
277 break;
278 case AMS_VCCINT:
279 channel_num = AMS_VCCINT_CH;
280 break;
281 case AMS_VCCBRAM:
282 channel_num = AMS_VCCBRAM_CH;
283 break;
284 case AMS_VCCAUX:
285 channel_num = AMS_VCCAUX_CH;
286 break;
287 case AMS_PSDDRPLL:
288 channel_num = AMS_PSDDRPLL_CH;
289 break;
290 case AMS_PSINTFPDDR:
291 channel_num = AMS_PSINTFPDDR_CH;
292 break;
293 default:
294 break;
295 }
296
297
298 ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
299 AMS_CONF1_SEQ_SINGLE_CHANNEL);
300
301
302 ams_ps_update_reg(ams, AMS_REG_CONFIG0, AMS_CONF0_CHANNEL_NUM_MASK,
303 channel_num);
304 mdelay(1);
305}
306
307static void ams_read_vcc_reg(struct ams *ams, unsigned int offset, u32 *data)
308{
309 ams_enable_single_channel(ams, offset);
310 ams_read_reg(ams, offset, data);
311 ams_enable_channel_sequence(ams);
312}
313
314static int ams_read_raw(struct iio_dev *indio_dev,
315 struct iio_chan_spec const *chan,
316 int *val, int *val2, long mask)
317{
318 struct ams *ams = iio_priv(indio_dev);
319
320 switch (mask) {
321 case IIO_CHAN_INFO_RAW:
322 mutex_lock(&ams->mutex);
323 if (chan->scan_index >= (PS_SEQ_MAX * 3))
324 ams_read_vcc_reg(ams, chan->address, val);
325 else if (chan->scan_index >= PS_SEQ_MAX)
326 ams->pl_bus->read(ams, chan->address, val);
327 else
328 ams_ps_read_reg(ams, chan->address, val);
329 mutex_unlock(&ams->mutex);
330
331 *val2 = 0;
332 return IIO_VAL_INT;
333 case IIO_CHAN_INFO_SCALE:
334 switch (chan->type) {
335 case IIO_VOLTAGE:
336 switch (chan->address) {
337 case AMS_SUPPLY1:
338 case AMS_SUPPLY2:
339 case AMS_SUPPLY3:
340 case AMS_SUPPLY4:
341 *val = AMS_SUPPLY_SCALE_3VOLT;
342 break;
343 case AMS_SUPPLY5:
344 case AMS_SUPPLY6:
345 if (chan->scan_index < PS_SEQ_MAX)
346 *val = AMS_SUPPLY_SCALE_6VOLT;
347 else
348 *val = AMS_SUPPLY_SCALE_3VOLT;
349 break;
350 case AMS_SUPPLY7:
351 case AMS_SUPPLY8:
352 *val = AMS_SUPPLY_SCALE_6VOLT;
353 break;
354 case AMS_SUPPLY9:
355 case AMS_SUPPLY10:
356 if (chan->scan_index < PS_SEQ_MAX)
357 *val = AMS_SUPPLY_SCALE_3VOLT;
358 else
359 *val = AMS_SUPPLY_SCALE_6VOLT;
360 break;
361 case AMS_VREFP:
362 case AMS_VREFN:
363 *val = AMS_SUPPLY_SCALE_3VOLT;
364 break;
365
366 default:
367 if (chan->scan_index >= (PS_SEQ_MAX * 3))
368 *val = AMS_SUPPLY_SCALE_3VOLT;
369 else
370 *val = AMS_SUPPLY_SCALE_1VOLT;
371 break;
372 }
373 *val2 = AMS_SUPPLY_SCALE_DIV_BIT;
374 return IIO_VAL_FRACTIONAL_LOG2;
375 case IIO_TEMP:
376 *val = AMS_TEMP_SCALE;
377 *val2 = AMS_TEMP_SCALE_DIV_BIT;
378 return IIO_VAL_FRACTIONAL_LOG2;
379 default:
380 return -EINVAL;
381 }
382 case IIO_CHAN_INFO_OFFSET:
383
384 *val = AMS_TEMP_OFFSET;
385 *val2 = 0;
386 return IIO_VAL_INT;
387 }
388
389 return -EINVAL;
390}
391
392static int ams_get_alarm_offset(int scan_index, enum iio_event_direction dir)
393{
394 int offset = 0;
395
396 if (scan_index >= PS_SEQ_MAX)
397 scan_index -= PS_SEQ_MAX;
398
399 if (dir == IIO_EV_DIR_FALLING) {
400 if (scan_index < AMS_SEQ_SUPPLY7)
401 offset = AMS_ALARM_THRESOLD_OFF_10;
402 else
403 offset = AMS_ALARM_THRESOLD_OFF_20;
404 }
405
406 switch (scan_index) {
407 case AMS_SEQ_TEMP:
408 return (AMS_ALARM_TEMP + offset);
409 case AMS_SEQ_SUPPLY1:
410 return (AMS_ALARM_SUPPLY1 + offset);
411 case AMS_SEQ_SUPPLY2:
412 return (AMS_ALARM_SUPPLY2 + offset);
413 case AMS_SEQ_SUPPLY3:
414 return (AMS_ALARM_SUPPLY3 + offset);
415 case AMS_SEQ_SUPPLY4:
416 return (AMS_ALARM_SUPPLY4 + offset);
417 case AMS_SEQ_SUPPLY5:
418 return (AMS_ALARM_SUPPLY5 + offset);
419 case AMS_SEQ_SUPPLY6:
420 return (AMS_ALARM_SUPPLY6 + offset);
421 case AMS_SEQ_SUPPLY7:
422 return (AMS_ALARM_SUPPLY7 + offset);
423 case AMS_SEQ_SUPPLY8:
424 return (AMS_ALARM_SUPPLY8 + offset);
425 case AMS_SEQ_SUPPLY9:
426 return (AMS_ALARM_SUPPLY9 + offset);
427 case AMS_SEQ_SUPPLY10:
428 return (AMS_ALARM_SUPPLY10 + offset);
429 case AMS_SEQ_VCCAMS:
430 return (AMS_ALARM_VCCAMS + offset);
431 case AMS_SEQ_TEMP_REMOTE:
432 return (AMS_ALARM_TEMP_REMOTE + offset);
433 }
434
435 return 0;
436}
437
438static const struct iio_chan_spec *ams_event_to_channel(
439 struct iio_dev *indio_dev, u32 event)
440{
441 int scan_index = 0, i;
442
443 if (event >= AMS_PL_ALARM_START) {
444 event -= AMS_PL_ALARM_START;
445 scan_index = PS_SEQ_MAX;
446 }
447
448 switch (event) {
449 case AMS_ALARM_BIT_TEMP:
450 scan_index += AMS_SEQ_TEMP;
451 break;
452 case AMS_ALARM_BIT_SUPPLY1:
453 scan_index += AMS_SEQ_SUPPLY1;
454 break;
455 case AMS_ALARM_BIT_SUPPLY2:
456 scan_index += AMS_SEQ_SUPPLY2;
457 break;
458 case AMS_ALARM_BIT_SUPPLY3:
459 scan_index += AMS_SEQ_SUPPLY3;
460 break;
461 case AMS_ALARM_BIT_SUPPLY4:
462 scan_index += AMS_SEQ_SUPPLY4;
463 break;
464 case AMS_ALARM_BIT_SUPPLY5:
465 scan_index += AMS_SEQ_SUPPLY5;
466 break;
467 case AMS_ALARM_BIT_SUPPLY6:
468 scan_index += AMS_SEQ_SUPPLY6;
469 break;
470 case AMS_ALARM_BIT_SUPPLY7:
471 scan_index += AMS_SEQ_SUPPLY7;
472 break;
473 case AMS_ALARM_BIT_SUPPLY8:
474 scan_index += AMS_SEQ_SUPPLY8;
475 break;
476 case AMS_ALARM_BIT_SUPPLY9:
477 scan_index += AMS_SEQ_SUPPLY9;
478 break;
479 case AMS_ALARM_BIT_SUPPLY10:
480 scan_index += AMS_SEQ_SUPPLY10;
481 break;
482 case AMS_ALARM_BIT_VCCAMS:
483 scan_index += AMS_SEQ_VCCAMS;
484 break;
485 case AMS_ALARM_BIT_TEMP_REMOTE:
486 scan_index += AMS_SEQ_TEMP_REMOTE;
487 break;
488 }
489
490 for (i = 0; i < indio_dev->num_channels; i++)
491 if (indio_dev->channels[i].scan_index == scan_index)
492 break;
493
494 return &indio_dev->channels[i];
495}
496
497static int ams_get_alarm_mask(int scan_index)
498{
499 int bit = 0;
500
501 if (scan_index >= PS_SEQ_MAX) {
502 bit = AMS_PL_ALARM_START;
503 scan_index -= PS_SEQ_MAX;
504 }
505
506 switch (scan_index) {
507 case AMS_SEQ_TEMP:
508 return BIT(AMS_ALARM_BIT_TEMP + bit);
509 case AMS_SEQ_SUPPLY1:
510 return BIT(AMS_ALARM_BIT_SUPPLY1 + bit);
511 case AMS_SEQ_SUPPLY2:
512 return BIT(AMS_ALARM_BIT_SUPPLY2 + bit);
513 case AMS_SEQ_SUPPLY3:
514 return BIT(AMS_ALARM_BIT_SUPPLY3 + bit);
515 case AMS_SEQ_SUPPLY4:
516 return BIT(AMS_ALARM_BIT_SUPPLY4 + bit);
517 case AMS_SEQ_SUPPLY5:
518 return BIT(AMS_ALARM_BIT_SUPPLY5 + bit);
519 case AMS_SEQ_SUPPLY6:
520 return BIT(AMS_ALARM_BIT_SUPPLY6 + bit);
521 case AMS_SEQ_SUPPLY7:
522 return BIT(AMS_ALARM_BIT_SUPPLY7 + bit);
523 case AMS_SEQ_SUPPLY8:
524 return BIT(AMS_ALARM_BIT_SUPPLY8 + bit);
525 case AMS_SEQ_SUPPLY9:
526 return BIT(AMS_ALARM_BIT_SUPPLY9 + bit);
527 case AMS_SEQ_SUPPLY10:
528 return BIT(AMS_ALARM_BIT_SUPPLY10 + bit);
529 case AMS_SEQ_VCCAMS:
530 return BIT(AMS_ALARM_BIT_VCCAMS + bit);
531 case AMS_SEQ_TEMP_REMOTE:
532 return BIT(AMS_ALARM_BIT_TEMP_REMOTE + bit);
533 }
534
535 return 0;
536}
537
538static int ams_read_event_config(struct iio_dev *indio_dev,
539 const struct iio_chan_spec *chan,
540 enum iio_event_type type,
541 enum iio_event_direction dir)
542{
543 struct ams *ams = iio_priv(indio_dev);
544
545 return (ams->alarm_mask & ams_get_alarm_mask(chan->scan_index)) ? 1 : 0;
546}
547
548static int ams_write_event_config(struct iio_dev *indio_dev,
549 const struct iio_chan_spec *chan,
550 enum iio_event_type type,
551 enum iio_event_direction dir,
552 int state)
553{
554 struct ams *ams = iio_priv(indio_dev);
555 unsigned int alarm;
556
557 alarm = ams_get_alarm_mask(chan->scan_index);
558
559 mutex_lock(&ams->mutex);
560
561 if (state)
562 ams->alarm_mask |= alarm;
563 else
564 ams->alarm_mask &= ~alarm;
565
566 iio_ams_update_alarm(ams, ams->alarm_mask);
567
568 mutex_unlock(&ams->mutex);
569
570 return 0;
571}
572
573static int ams_read_event_value(struct iio_dev *indio_dev,
574 const struct iio_chan_spec *chan,
575 enum iio_event_type type,
576 enum iio_event_direction dir,
577 enum iio_event_info info, int *val, int *val2)
578{
579 struct ams *ams = iio_priv(indio_dev);
580 unsigned int offset = ams_get_alarm_offset(chan->scan_index, dir);
581
582 mutex_lock(&ams->mutex);
583
584 if (chan->scan_index >= PS_SEQ_MAX)
585 ams->pl_bus->read(ams, offset, val);
586 else
587 ams_ps_read_reg(ams, offset, val);
588
589 mutex_unlock(&ams->mutex);
590
591 *val2 = 0;
592 return IIO_VAL_INT;
593}
594
595static int ams_write_event_value(struct iio_dev *indio_dev,
596 const struct iio_chan_spec *chan,
597 enum iio_event_type type,
598 enum iio_event_direction dir,
599 enum iio_event_info info, int val, int val2)
600{
601 struct ams *ams = iio_priv(indio_dev);
602 unsigned int offset;
603
604 mutex_lock(&ams->mutex);
605
606
607 if (chan->type == IIO_TEMP) {
608 offset = ams_get_alarm_offset(chan->scan_index,
609 IIO_EV_DIR_FALLING);
610
611 if (chan->scan_index >= PS_SEQ_MAX)
612 ams->pl_bus->update(ams, offset,
613 AMS_ALARM_THR_DIRECT_MASK,
614 AMS_ALARM_THR_DIRECT_MASK);
615 else
616 ams_ps_update_reg(ams, offset,
617 AMS_ALARM_THR_DIRECT_MASK,
618 AMS_ALARM_THR_DIRECT_MASK);
619 }
620
621 offset = ams_get_alarm_offset(chan->scan_index, dir);
622 if (chan->scan_index >= PS_SEQ_MAX)
623 ams->pl_bus->write(ams, offset, val);
624 else
625 ams_ps_write_reg(ams, offset, val);
626
627 mutex_unlock(&ams->mutex);
628
629 return 0;
630}
631
632static void ams_handle_event(struct iio_dev *indio_dev, u32 event)
633{
634 const struct iio_chan_spec *chan;
635
636 chan = ams_event_to_channel(indio_dev, event);
637
638 if (chan->type == IIO_TEMP) {
639
640
641
642 iio_push_event(indio_dev,
643 IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
644 IIO_EV_TYPE_THRESH,
645 IIO_EV_DIR_RISING),
646 iio_get_time_ns(indio_dev));
647 } else {
648
649
650
651
652 iio_push_event(indio_dev,
653 IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
654 IIO_EV_TYPE_THRESH,
655 IIO_EV_DIR_EITHER),
656 iio_get_time_ns(indio_dev));
657 }
658}
659
660static void ams_handle_events(struct iio_dev *indio_dev, unsigned long events)
661{
662 unsigned int bit;
663
664 for_each_set_bit(bit, &events, AMS_NO_OF_ALARMS)
665 ams_handle_event(indio_dev, bit);
666}
667
668
669
670
671
672
673
674
675
676
677
678
679static void ams_unmask_worker(struct work_struct *work)
680{
681 struct ams *ams = container_of(work, struct ams, ams_unmask_work.work);
682 unsigned int status, unmask;
683
684 spin_lock_irq(&ams->lock);
685
686 ams_read_reg(ams, AMS_ISR_0, &status);
687
688
689 unmask = (ams->masked_alarm ^ status) & ams->masked_alarm;
690
691
692 unmask |= ams->intr_mask;
693
694 ams->masked_alarm &= status;
695
696
697 ams->masked_alarm &= ~ams->intr_mask;
698
699
700 ams_write_reg(ams, AMS_ISR_0, unmask);
701
702 ams_update_intrmask(ams, 0, 0);
703
704 spin_unlock_irq(&ams->lock);
705
706
707 if (ams->masked_alarm)
708 schedule_delayed_work(&ams->ams_unmask_work,
709 msecs_to_jiffies(AMS_UNMASK_TIMEOUT));
710}
711
712static irqreturn_t ams_iio_irq(int irq, void *data)
713{
714 unsigned int isr0, isr1;
715 struct iio_dev *indio_dev = data;
716 struct ams *ams = iio_priv(indio_dev);
717
718 spin_lock(&ams->lock);
719
720 ams_read_reg(ams, AMS_ISR_0, &isr0);
721 ams_read_reg(ams, AMS_ISR_1, &isr1);
722
723
724 isr0 &= ~((ams->intr_mask & AMS_ISR0_ALARM_MASK) | ams->masked_alarm);
725 isr1 &= ~(ams->intr_mask >> AMS_ISR1_INTR_MASK_SHIFT);
726
727
728 ams_write_reg(ams, AMS_ISR_0, isr0);
729 ams_write_reg(ams, AMS_ISR_1, isr1);
730
731 if (isr0) {
732
733 ams->masked_alarm |= isr0;
734 ams_update_intrmask(ams, 0, 0);
735
736 ams_handle_events(indio_dev, isr0);
737
738 schedule_delayed_work(&ams->ams_unmask_work,
739 msecs_to_jiffies(AMS_UNMASK_TIMEOUT));
740 }
741
742 spin_unlock(&ams->lock);
743
744 return IRQ_HANDLED;
745}
746
747static const struct iio_event_spec ams_temp_events[] = {
748 {
749 .type = IIO_EV_TYPE_THRESH,
750 .dir = IIO_EV_DIR_RISING,
751 .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
752 BIT(IIO_EV_INFO_VALUE),
753 },
754};
755
756static const struct iio_event_spec ams_voltage_events[] = {
757 {
758 .type = IIO_EV_TYPE_THRESH,
759 .dir = IIO_EV_DIR_RISING,
760 .mask_separate = BIT(IIO_EV_INFO_VALUE),
761 }, {
762 .type = IIO_EV_TYPE_THRESH,
763 .dir = IIO_EV_DIR_FALLING,
764 .mask_separate = BIT(IIO_EV_INFO_VALUE),
765 }, {
766 .type = IIO_EV_TYPE_THRESH,
767 .dir = IIO_EV_DIR_EITHER,
768 .mask_separate = BIT(IIO_EV_INFO_ENABLE),
769 },
770};
771
772static const struct iio_chan_spec ams_ps_channels[] = {
773 AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP, "ps_temp"),
774 AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP_REMOTE, AMS_TEMP_REMOTE, "remote_temp"),
775 AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1, "vccpsintlp"),
776 AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2, "vccpsintfp"),
777 AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3, "vccpsaux"),
778 AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4, "vccpsddr"),
779 AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5, "vccpsio3"),
780 AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6, "vccpsio0"),
781 AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7, "vccpsio1"),
782 AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8, "vccpsio2"),
783 AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9, "psmgtravcc"),
784 AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10, "psmgtravtt"),
785 AMS_PS_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS, "vccams"),
786};
787
788static const struct iio_chan_spec ams_pl_channels[] = {
789 AMS_PL_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP, "pl_temp"),
790 AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1, "vccint", true),
791 AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2, "vccaux", true),
792 AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFP, AMS_VREFP, "vccvrefp", false),
793 AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFN, AMS_VREFN, "vccvrefn", false),
794 AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3, "vccbram", true),
795 AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4, "vccplintlp", true),
796 AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5, "vccplintfp", true),
797 AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6, "vccplaux", true),
798 AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS, "vccams", true),
799 AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VP_VN, AMS_VP_VN, "vccvpvn", false),
800 AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7, "vuser0", true),
801 AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8, "vuser1", true),
802 AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9, "vuser2", true),
803 AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10, "vuser3", true),
804 AMS_PL_AUX_CHAN_VOLTAGE(0, "vccaux0"),
805 AMS_PL_AUX_CHAN_VOLTAGE(1, "vccaux1"),
806 AMS_PL_AUX_CHAN_VOLTAGE(2, "vccaux2"),
807 AMS_PL_AUX_CHAN_VOLTAGE(3, "vccaux3"),
808 AMS_PL_AUX_CHAN_VOLTAGE(4, "vccaux4"),
809 AMS_PL_AUX_CHAN_VOLTAGE(5, "vccaux5"),
810 AMS_PL_AUX_CHAN_VOLTAGE(6, "vccaux6"),
811 AMS_PL_AUX_CHAN_VOLTAGE(7, "vccaux7"),
812 AMS_PL_AUX_CHAN_VOLTAGE(8, "vccaux8"),
813 AMS_PL_AUX_CHAN_VOLTAGE(9, "vccaux9"),
814 AMS_PL_AUX_CHAN_VOLTAGE(10, "vccaux10"),
815 AMS_PL_AUX_CHAN_VOLTAGE(11, "vccaux11"),
816 AMS_PL_AUX_CHAN_VOLTAGE(12, "vccaux12"),
817 AMS_PL_AUX_CHAN_VOLTAGE(13, "vccaux13"),
818 AMS_PL_AUX_CHAN_VOLTAGE(14, "vccaux14"),
819 AMS_PL_AUX_CHAN_VOLTAGE(15, "vccaux15"),
820};
821
822static const struct iio_chan_spec ams_ctrl_channels[] = {
823 AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSPLL, AMS_VCC_PSPLL0, "vcc_pspll0"),
824 AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSBATT, AMS_VCC_PSPLL3, "vcc_psbatt"),
825 AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCINT, AMS_VCCINT, "vccint"),
826 AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCBRAM, AMS_VCCBRAM, "vccbram"),
827 AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCAUX, AMS_VCCAUX, "vccaux"),
828 AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_PSDDRPLL, AMS_PSDDRPLL, "vcc_psddrpll"),
829 AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_INTDDR, AMS_PSINTFPDDR, "vccpsintfpddr"),
830};
831
832static int ams_init_module(struct iio_dev *indio_dev, struct device_node *np,
833 struct iio_chan_spec *channels)
834{
835 struct ams *ams = iio_priv(indio_dev);
836 struct device_node *chan_node, *child;
837 int ret, num_channels = 0;
838 unsigned int reg;
839
840 if (of_device_is_compatible(np, "xlnx,zynqmp-ams-ps")) {
841 ams->ps_base = of_iomap(np, 0);
842 if (!ams->ps_base)
843 return -ENXIO;
844
845
846 memcpy(channels + num_channels, ams_ps_channels,
847 sizeof(ams_ps_channels));
848 num_channels += ARRAY_SIZE(ams_ps_channels);
849 } else if (of_device_is_compatible(np, "xlnx,zynqmp-ams-pl")) {
850 ams->pl_base = of_iomap(np, 0);
851 if (!ams->pl_base)
852 return -ENXIO;
853
854
855 memcpy(channels + num_channels, ams_pl_channels,
856 AMS_PL_MAX_FIXED_CHANNEL * sizeof(*channels));
857 num_channels += AMS_PL_MAX_FIXED_CHANNEL;
858
859 chan_node = of_get_child_by_name(np, "xlnx,ext-channels");
860 if (chan_node) {
861 for_each_child_of_node(chan_node, child) {
862 ret = of_property_read_u32(child, "reg", ®);
863 if (ret || reg > AMS_PL_MAX_EXT_CHANNEL)
864 continue;
865
866 memcpy(&channels[num_channels],
867 &ams_pl_channels[reg +
868 AMS_PL_MAX_FIXED_CHANNEL],
869 sizeof(*channels));
870
871 if (of_property_read_bool(child,
872 "xlnx,bipolar"))
873 channels[num_channels].
874 scan_type.sign = 's';
875
876 num_channels += 1;
877 }
878 }
879 of_node_put(chan_node);
880 } else if (of_device_is_compatible(np, "xlnx,zynqmp-ams")) {
881
882 memcpy(channels + num_channels, ams_ctrl_channels,
883 sizeof(ams_ctrl_channels));
884 num_channels += ARRAY_SIZE(ams_ctrl_channels);
885 } else {
886 return -EINVAL;
887 }
888
889 return num_channels;
890}
891
892static int ams_parse_dt(struct iio_dev *indio_dev, struct platform_device *pdev)
893{
894 struct ams *ams = iio_priv(indio_dev);
895 struct iio_chan_spec *ams_channels, *dev_channels;
896 struct device_node *child_node = NULL, *np = pdev->dev.of_node;
897 int ret, chan_vol = 0, chan_temp = 0, i, rising_off, falling_off;
898 unsigned int num_channels = 0;
899
900
901 ams_channels = kzalloc(sizeof(ams_ps_channels) +
902 sizeof(ams_pl_channels) +
903 sizeof(ams_ctrl_channels), GFP_KERNEL);
904 if (!ams_channels)
905 return -ENOMEM;
906
907 if (of_device_is_available(np)) {
908 ret = ams_init_module(indio_dev, np, ams_channels);
909 if (ret < 0) {
910 kfree(ams_channels);
911 return ret;
912 }
913
914 num_channels += ret;
915 }
916
917 for_each_child_of_node(np, child_node) {
918 if (of_device_is_available(child_node)) {
919 ret = ams_init_module(indio_dev, child_node,
920 ams_channels + num_channels);
921 if (ret < 0) {
922 kfree(ams_channels);
923 return ret;
924 }
925
926 num_channels += ret;
927 }
928 }
929
930 for (i = 0; i < num_channels; i++) {
931 if (ams_channels[i].type == IIO_VOLTAGE)
932 ams_channels[i].channel = chan_vol++;
933 else
934 ams_channels[i].channel = chan_temp++;
935
936 if (ams_channels[i].scan_index < (PS_SEQ_MAX * 3)) {
937
938 falling_off = ams_get_alarm_offset(
939 ams_channels[i].scan_index,
940 IIO_EV_DIR_FALLING);
941 rising_off = ams_get_alarm_offset(
942 ams_channels[i].scan_index,
943 IIO_EV_DIR_RISING);
944 if (ams_channels[i].scan_index >= PS_SEQ_MAX) {
945 ams->pl_bus->write(ams, falling_off,
946 AMS_ALARM_THR_MIN);
947 ams->pl_bus->write(ams, rising_off,
948 AMS_ALARM_THR_MAX);
949 } else {
950 ams_ps_write_reg(ams, falling_off,
951 AMS_ALARM_THR_MIN);
952 ams_ps_write_reg(ams, rising_off,
953 AMS_ALARM_THR_MAX);
954 }
955 }
956 }
957
958 dev_channels = devm_kzalloc(&pdev->dev, sizeof(*dev_channels) *
959 num_channels, GFP_KERNEL);
960 if (!dev_channels) {
961 kfree(ams_channels);
962 return -ENOMEM;
963 }
964
965 memcpy(dev_channels, ams_channels,
966 sizeof(*ams_channels) * num_channels);
967 kfree(ams_channels);
968 indio_dev->channels = dev_channels;
969 indio_dev->num_channels = num_channels;
970
971 return 0;
972}
973
974static const struct iio_info iio_pl_info = {
975 .read_raw = &ams_read_raw,
976 .read_event_config = &ams_read_event_config,
977 .write_event_config = &ams_write_event_config,
978 .read_event_value = &ams_read_event_value,
979 .write_event_value = &ams_write_event_value,
980};
981
982static const struct ams_pl_bus_ops ams_pl_apb = {
983 .read = ams_apb_pl_read_reg,
984 .write = ams_apb_pl_write_reg,
985 .update = ams_apb_pl_update_reg,
986};
987
988static const struct of_device_id ams_of_match_table[] = {
989 { .compatible = "xlnx,zynqmp-ams", &ams_pl_apb },
990 { }
991};
992MODULE_DEVICE_TABLE(of, ams_of_match_table);
993
994static int ams_probe(struct platform_device *pdev)
995{
996 struct iio_dev *indio_dev;
997 struct ams *ams;
998 struct resource *res;
999 const struct of_device_id *id;
1000 int ret;
1001
1002 if (!pdev->dev.of_node)
1003 return -ENODEV;
1004
1005 id = of_match_node(ams_of_match_table, pdev->dev.of_node);
1006 if (!id)
1007 return -ENODEV;
1008
1009 indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*ams));
1010 if (!indio_dev)
1011 return -ENOMEM;
1012
1013 ams = iio_priv(indio_dev);
1014 ams->indio_dev = indio_dev;
1015 ams->pl_bus = id->data;
1016 mutex_init(&ams->mutex);
1017 spin_lock_init(&ams->lock);
1018
1019 indio_dev->dev.parent = &pdev->dev;
1020 indio_dev->dev.of_node = pdev->dev.of_node;
1021 indio_dev->name = "ams";
1022
1023 indio_dev->info = &iio_pl_info;
1024 indio_dev->modes = INDIO_DIRECT_MODE;
1025
1026 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ams-base");
1027 ams->base = devm_ioremap_resource(&pdev->dev, res);
1028 if (IS_ERR(ams->base))
1029 return PTR_ERR(ams->base);
1030
1031 INIT_DELAYED_WORK(&ams->ams_unmask_work, ams_unmask_worker);
1032
1033 ams->clk = devm_clk_get(&pdev->dev, NULL);
1034 if (IS_ERR(ams->clk))
1035 return PTR_ERR(ams->clk);
1036 clk_prepare_enable(ams->clk);
1037
1038 ret = iio_ams_init_device(ams);
1039 if (ret) {
1040 dev_err(&pdev->dev, "failed to initialize AMS\n");
1041 goto clk_disable;
1042 }
1043
1044 ret = ams_parse_dt(indio_dev, pdev);
1045 if (ret) {
1046 dev_err(&pdev->dev, "failure in parsing DT\n");
1047 goto clk_disable;
1048 }
1049
1050 ams_enable_channel_sequence(ams);
1051
1052 ams->irq = platform_get_irq_byname(pdev, "ams-irq");
1053 ret = devm_request_irq(&pdev->dev, ams->irq, &ams_iio_irq, 0, "ams-irq",
1054 indio_dev);
1055 if (ret < 0) {
1056 dev_err(&pdev->dev, "failed to register interrupt\n");
1057 goto clk_disable;
1058 }
1059
1060 platform_set_drvdata(pdev, indio_dev);
1061
1062 return iio_device_register(indio_dev);
1063
1064clk_disable:
1065 clk_disable_unprepare(ams->clk);
1066 return ret;
1067}
1068
1069static int ams_remove(struct platform_device *pdev)
1070{
1071 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
1072 struct ams *ams = iio_priv(indio_dev);
1073
1074 cancel_delayed_work(&ams->ams_unmask_work);
1075
1076
1077 iio_device_unregister(indio_dev);
1078 clk_disable_unprepare(ams->clk);
1079 return 0;
1080}
1081
1082static int __maybe_unused ams_suspend(struct device *dev)
1083{
1084 struct iio_dev *indio_dev = dev_get_drvdata(dev);
1085 struct ams *ams = iio_priv(indio_dev);
1086
1087 clk_disable_unprepare(ams->clk);
1088
1089 return 0;
1090}
1091
1092static int __maybe_unused ams_resume(struct device *dev)
1093{
1094 struct iio_dev *indio_dev = dev_get_drvdata(dev);
1095 struct ams *ams = iio_priv(indio_dev);
1096
1097 clk_prepare_enable(ams->clk);
1098 return 0;
1099}
1100
1101static SIMPLE_DEV_PM_OPS(ams_pm_ops, ams_suspend, ams_resume);
1102
1103static struct platform_driver ams_driver = {
1104 .probe = ams_probe,
1105 .remove = ams_remove,
1106 .driver = {
1107 .name = "ams",
1108 .pm = &ams_pm_ops,
1109 .of_match_table = ams_of_match_table,
1110 },
1111};
1112module_platform_driver(ams_driver);
1113
1114MODULE_LICENSE("GPL v2");
1115MODULE_AUTHOR("Rajnikant Bhojani <rajnikant.bhojani@xilinx.com>");
1116