1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/device.h>
13#include <linux/io.h>
14#include <linux/mm.h>
15#include <linux/slab.h>
16#include <linux/bitmap.h>
17#include <linux/pm_runtime.h>
18
19#include "intel_th.h"
20#include "gth.h"
21
22struct gth_device;
23
24
25
26
27
28
29
30
31
32struct gth_output {
33 struct gth_device *gth;
34 struct intel_th_output *output;
35 unsigned int index;
36 unsigned int port_type;
37 DECLARE_BITMAP(master, TH_CONFIGURABLE_MASTERS + 1);
38};
39
40
41
42
43
44
45
46
47
48
49
50struct gth_device {
51 struct device *dev;
52 void __iomem *base;
53
54 struct attribute_group output_group;
55 struct attribute_group master_group;
56 struct gth_output output[TH_POSSIBLE_OUTPUTS];
57 signed char master[TH_CONFIGURABLE_MASTERS + 1];
58 spinlock_t gth_lock;
59};
60
61static void gth_output_set(struct gth_device *gth, int port,
62 unsigned int config)
63{
64 unsigned long reg = port & 4 ? REG_GTH_GTHOPT1 : REG_GTH_GTHOPT0;
65 u32 val;
66 int shift = (port & 3) * 8;
67
68 val = ioread32(gth->base + reg);
69 val &= ~(0xff << shift);
70 val |= config << shift;
71 iowrite32(val, gth->base + reg);
72}
73
74static unsigned int gth_output_get(struct gth_device *gth, int port)
75{
76 unsigned long reg = port & 4 ? REG_GTH_GTHOPT1 : REG_GTH_GTHOPT0;
77 u32 val;
78 int shift = (port & 3) * 8;
79
80 val = ioread32(gth->base + reg);
81 val &= 0xff << shift;
82 val >>= shift;
83
84 return val;
85}
86
87static void gth_smcfreq_set(struct gth_device *gth, int port,
88 unsigned int freq)
89{
90 unsigned long reg = REG_GTH_SMCR0 + ((port / 2) * 4);
91 int shift = (port & 1) * 16;
92 u32 val;
93
94 val = ioread32(gth->base + reg);
95 val &= ~(0xffff << shift);
96 val |= freq << shift;
97 iowrite32(val, gth->base + reg);
98}
99
100static unsigned int gth_smcfreq_get(struct gth_device *gth, int port)
101{
102 unsigned long reg = REG_GTH_SMCR0 + ((port / 2) * 4);
103 int shift = (port & 1) * 16;
104 u32 val;
105
106 val = ioread32(gth->base + reg);
107 val &= 0xffff << shift;
108 val >>= shift;
109
110 return val;
111}
112
113
114
115
116
117struct master_attribute {
118 struct device_attribute attr;
119 struct gth_device *gth;
120 unsigned int master;
121};
122
123static void
124gth_master_set(struct gth_device *gth, unsigned int master, int port)
125{
126 unsigned int reg = REG_GTH_SWDEST0 + ((master >> 1) & ~3u);
127 unsigned int shift = (master & 0x7) * 4;
128 u32 val;
129
130 if (master >= 256) {
131 reg = REG_GTH_GSWTDEST;
132 shift = 0;
133 }
134
135 val = ioread32(gth->base + reg);
136 val &= ~(0xf << shift);
137 if (port >= 0)
138 val |= (0x8 | port) << shift;
139 iowrite32(val, gth->base + reg);
140}
141
142static ssize_t master_attr_show(struct device *dev,
143 struct device_attribute *attr,
144 char *buf)
145{
146 struct master_attribute *ma =
147 container_of(attr, struct master_attribute, attr);
148 struct gth_device *gth = ma->gth;
149 size_t count;
150 int port;
151
152 spin_lock(>h->gth_lock);
153 port = gth->master[ma->master];
154 spin_unlock(>h->gth_lock);
155
156 if (port >= 0)
157 count = snprintf(buf, PAGE_SIZE, "%x\n", port);
158 else
159 count = snprintf(buf, PAGE_SIZE, "disabled\n");
160
161 return count;
162}
163
164static ssize_t master_attr_store(struct device *dev,
165 struct device_attribute *attr,
166 const char *buf, size_t count)
167{
168 struct master_attribute *ma =
169 container_of(attr, struct master_attribute, attr);
170 struct gth_device *gth = ma->gth;
171 int old_port, port;
172
173 if (kstrtoint(buf, 10, &port) < 0)
174 return -EINVAL;
175
176 if (port >= TH_POSSIBLE_OUTPUTS || port < -1)
177 return -EINVAL;
178
179 spin_lock(>h->gth_lock);
180
181
182 old_port = gth->master[ma->master];
183 if (old_port >= 0) {
184 gth->master[ma->master] = -1;
185 clear_bit(ma->master, gth->output[old_port].master);
186
187
188
189
190
191 if (gth->output[old_port].output->active)
192 gth_master_set(gth, ma->master, -1);
193 }
194
195
196 if (port >= 0) {
197
198 if (!gth->output[port].output) {
199 count = -ENODEV;
200 goto unlock;
201 }
202
203 set_bit(ma->master, gth->output[port].master);
204
205
206 if (gth->output[port].output->active)
207 gth_master_set(gth, ma->master, port);
208 }
209
210 gth->master[ma->master] = port;
211
212unlock:
213 spin_unlock(>h->gth_lock);
214
215 return count;
216}
217
218struct output_attribute {
219 struct device_attribute attr;
220 struct gth_device *gth;
221 unsigned int port;
222 unsigned int parm;
223};
224
225#define OUTPUT_PARM(_name, _mask, _r, _w, _what) \
226 [TH_OUTPUT_PARM(_name)] = { .name = __stringify(_name), \
227 .get = gth_ ## _what ## _get, \
228 .set = gth_ ## _what ## _set, \
229 .mask = (_mask), \
230 .readable = (_r), \
231 .writable = (_w) }
232
233static const struct output_parm {
234 const char *name;
235 unsigned int (*get)(struct gth_device *gth, int port);
236 void (*set)(struct gth_device *gth, int port,
237 unsigned int val);
238 unsigned int mask;
239 unsigned int readable : 1,
240 writable : 1;
241} output_parms[] = {
242 OUTPUT_PARM(port, 0x7, 1, 0, output),
243 OUTPUT_PARM(null, BIT(3), 1, 1, output),
244 OUTPUT_PARM(drop, BIT(4), 1, 1, output),
245 OUTPUT_PARM(reset, BIT(5), 1, 0, output),
246 OUTPUT_PARM(flush, BIT(7), 0, 1, output),
247 OUTPUT_PARM(smcfreq, 0xffff, 1, 1, smcfreq),
248};
249
250static void
251gth_output_parm_set(struct gth_device *gth, int port, unsigned int parm,
252 unsigned int val)
253{
254 unsigned int config = output_parms[parm].get(gth, port);
255 unsigned int mask = output_parms[parm].mask;
256 unsigned int shift = __ffs(mask);
257
258 config &= ~mask;
259 config |= (val << shift) & mask;
260 output_parms[parm].set(gth, port, config);
261}
262
263static unsigned int
264gth_output_parm_get(struct gth_device *gth, int port, unsigned int parm)
265{
266 unsigned int config = output_parms[parm].get(gth, port);
267 unsigned int mask = output_parms[parm].mask;
268 unsigned int shift = __ffs(mask);
269
270 config &= mask;
271 config >>= shift;
272 return config;
273}
274
275
276
277
278static int intel_th_gth_reset(struct gth_device *gth)
279{
280 u32 reg;
281 int port, i;
282
283 reg = ioread32(gth->base + REG_GTH_SCRPD0);
284 if (reg & SCRPD_DEBUGGER_IN_USE)
285 return -EBUSY;
286
287
288 reg |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED;
289 iowrite32(reg, gth->base + REG_GTH_SCRPD0);
290
291
292 for (port = 0; port < 8; port++) {
293 if (gth_output_parm_get(gth, port, TH_OUTPUT_PARM(port)) ==
294 GTH_NONE)
295 continue;
296
297 gth_output_set(gth, port, 0);
298 gth_smcfreq_set(gth, port, 16);
299 }
300
301 iowrite32(0, gth->base + REG_GTH_DESTOVR);
302
303
304 for (i = 0; i < 33; i++)
305 iowrite32(0, gth->base + REG_GTH_SWDEST0 + i * 4);
306
307
308 iowrite32(0, gth->base + REG_GTH_SCR);
309 iowrite32(0xfc, gth->base + REG_GTH_SCR2);
310
311
312 iowrite32(CTS_EVENT_ENABLE_IF_ANYTHING, gth->base + REG_CTS_C0S0_EN);
313 iowrite32(CTS_ACTION_CONTROL_SET_STATE(CTS_STATE_IDLE) |
314 CTS_ACTION_CONTROL_TRIGGER, gth->base + REG_CTS_C0S0_ACT);
315
316 return 0;
317}
318
319
320
321
322
323static ssize_t output_attr_show(struct device *dev,
324 struct device_attribute *attr,
325 char *buf)
326{
327 struct output_attribute *oa =
328 container_of(attr, struct output_attribute, attr);
329 struct gth_device *gth = oa->gth;
330 size_t count;
331
332 pm_runtime_get_sync(dev);
333
334 spin_lock(>h->gth_lock);
335 count = snprintf(buf, PAGE_SIZE, "%x\n",
336 gth_output_parm_get(gth, oa->port, oa->parm));
337 spin_unlock(>h->gth_lock);
338
339 pm_runtime_put(dev);
340
341 return count;
342}
343
344static ssize_t output_attr_store(struct device *dev,
345 struct device_attribute *attr,
346 const char *buf, size_t count)
347{
348 struct output_attribute *oa =
349 container_of(attr, struct output_attribute, attr);
350 struct gth_device *gth = oa->gth;
351 unsigned int config;
352
353 if (kstrtouint(buf, 16, &config) < 0)
354 return -EINVAL;
355
356 pm_runtime_get_sync(dev);
357
358 spin_lock(>h->gth_lock);
359 gth_output_parm_set(gth, oa->port, oa->parm, config);
360 spin_unlock(>h->gth_lock);
361
362 pm_runtime_put(dev);
363
364 return count;
365}
366
367static int intel_th_master_attributes(struct gth_device *gth)
368{
369 struct master_attribute *master_attrs;
370 struct attribute **attrs;
371 int i, nattrs = TH_CONFIGURABLE_MASTERS + 2;
372
373 attrs = devm_kcalloc(gth->dev, nattrs, sizeof(void *), GFP_KERNEL);
374 if (!attrs)
375 return -ENOMEM;
376
377 master_attrs = devm_kcalloc(gth->dev, nattrs,
378 sizeof(struct master_attribute),
379 GFP_KERNEL);
380 if (!master_attrs)
381 return -ENOMEM;
382
383 for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++) {
384 char *name;
385
386 name = devm_kasprintf(gth->dev, GFP_KERNEL, "%d%s", i,
387 i == TH_CONFIGURABLE_MASTERS ? "+" : "");
388 if (!name)
389 return -ENOMEM;
390
391 master_attrs[i].attr.attr.name = name;
392 master_attrs[i].attr.attr.mode = S_IRUGO | S_IWUSR;
393 master_attrs[i].attr.show = master_attr_show;
394 master_attrs[i].attr.store = master_attr_store;
395
396 sysfs_attr_init(&master_attrs[i].attr.attr);
397 attrs[i] = &master_attrs[i].attr.attr;
398
399 master_attrs[i].gth = gth;
400 master_attrs[i].master = i;
401 }
402
403 gth->master_group.name = "masters";
404 gth->master_group.attrs = attrs;
405
406 return sysfs_create_group(>h->dev->kobj, >h->master_group);
407}
408
409static int intel_th_output_attributes(struct gth_device *gth)
410{
411 struct output_attribute *out_attrs;
412 struct attribute **attrs;
413 int i, j, nouts = TH_POSSIBLE_OUTPUTS;
414 int nparms = ARRAY_SIZE(output_parms);
415 int nattrs = nouts * nparms + 1;
416
417 attrs = devm_kcalloc(gth->dev, nattrs, sizeof(void *), GFP_KERNEL);
418 if (!attrs)
419 return -ENOMEM;
420
421 out_attrs = devm_kcalloc(gth->dev, nattrs,
422 sizeof(struct output_attribute),
423 GFP_KERNEL);
424 if (!out_attrs)
425 return -ENOMEM;
426
427 for (i = 0; i < nouts; i++) {
428 for (j = 0; j < nparms; j++) {
429 unsigned int idx = i * nparms + j;
430 char *name;
431
432 name = devm_kasprintf(gth->dev, GFP_KERNEL, "%d_%s", i,
433 output_parms[j].name);
434 if (!name)
435 return -ENOMEM;
436
437 out_attrs[idx].attr.attr.name = name;
438
439 if (output_parms[j].readable) {
440 out_attrs[idx].attr.attr.mode |= S_IRUGO;
441 out_attrs[idx].attr.show = output_attr_show;
442 }
443
444 if (output_parms[j].writable) {
445 out_attrs[idx].attr.attr.mode |= S_IWUSR;
446 out_attrs[idx].attr.store = output_attr_store;
447 }
448
449 sysfs_attr_init(&out_attrs[idx].attr.attr);
450 attrs[idx] = &out_attrs[idx].attr.attr;
451
452 out_attrs[idx].gth = gth;
453 out_attrs[idx].port = i;
454 out_attrs[idx].parm = j;
455 }
456 }
457
458 gth->output_group.name = "outputs";
459 gth->output_group.attrs = attrs;
460
461 return sysfs_create_group(>h->dev->kobj, >h->output_group);
462}
463
464
465
466
467
468
469
470
471
472
473static void intel_th_gth_stop(struct gth_device *gth,
474 struct intel_th_output *output,
475 bool capture_done)
476{
477 struct intel_th_device *outdev =
478 container_of(output, struct intel_th_device, output);
479 struct intel_th_driver *outdrv =
480 to_intel_th_driver(outdev->dev.driver);
481 unsigned long count;
482 u32 reg;
483 u32 scr2 = 0xfc | (capture_done ? 1 : 0);
484
485 iowrite32(0, gth->base + REG_GTH_SCR);
486 iowrite32(scr2, gth->base + REG_GTH_SCR2);
487
488
489 for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH;
490 count && !(reg & BIT(output->port)); count--) {
491 reg = ioread32(gth->base + REG_GTH_STAT);
492 cpu_relax();
493 }
494
495 if (!count)
496 dev_dbg(gth->dev, "timeout waiting for GTH[%d] PLE\n",
497 output->port);
498
499
500 if (outdrv->wait_empty)
501 outdrv->wait_empty(outdev);
502
503
504 iowrite32(0xfc, gth->base + REG_GTH_SCR2);
505}
506
507
508
509
510
511
512
513
514static void intel_th_gth_start(struct gth_device *gth,
515 struct intel_th_output *output)
516{
517 u32 scr = 0xfc0000;
518
519 if (output->multiblock)
520 scr |= 0xff;
521
522 iowrite32(scr, gth->base + REG_GTH_SCR);
523 iowrite32(0, gth->base + REG_GTH_SCR2);
524}
525
526
527
528
529
530
531
532
533
534
535static void intel_th_gth_disable(struct intel_th_device *thdev,
536 struct intel_th_output *output)
537{
538 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
539 int master;
540 u32 reg;
541
542 spin_lock(>h->gth_lock);
543 output->active = false;
544
545 for_each_set_bit(master, gth->output[output->port].master,
546 TH_CONFIGURABLE_MASTERS) {
547 gth_master_set(gth, master, -1);
548 }
549 spin_unlock(>h->gth_lock);
550
551 intel_th_gth_stop(gth, output, true);
552
553 reg = ioread32(gth->base + REG_GTH_SCRPD0);
554 reg &= ~output->scratchpad;
555 iowrite32(reg, gth->base + REG_GTH_SCRPD0);
556}
557
558static void gth_tscu_resync(struct gth_device *gth)
559{
560 u32 reg;
561
562 reg = ioread32(gth->base + REG_TSCU_TSUCTRL);
563 reg &= ~TSUCTRL_CTCRESYNC;
564 iowrite32(reg, gth->base + REG_TSCU_TSUCTRL);
565}
566
567
568
569
570
571
572
573
574
575static void intel_th_gth_enable(struct intel_th_device *thdev,
576 struct intel_th_output *output)
577{
578 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
579 struct intel_th *th = to_intel_th(thdev);
580 int master;
581 u32 scrpd;
582
583 spin_lock(>h->gth_lock);
584 for_each_set_bit(master, gth->output[output->port].master,
585 TH_CONFIGURABLE_MASTERS + 1) {
586 gth_master_set(gth, master, output->port);
587 }
588
589 output->active = true;
590 spin_unlock(>h->gth_lock);
591
592 if (INTEL_TH_CAP(th, tscu_enable))
593 gth_tscu_resync(gth);
594
595 scrpd = ioread32(gth->base + REG_GTH_SCRPD0);
596 scrpd |= output->scratchpad;
597 iowrite32(scrpd, gth->base + REG_GTH_SCRPD0);
598
599 intel_th_gth_start(gth, output);
600}
601
602
603
604
605
606
607
608
609
610static void intel_th_gth_switch(struct intel_th_device *thdev,
611 struct intel_th_output *output)
612{
613 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
614 unsigned long count;
615 u32 reg;
616
617
618 iowrite32(0, gth->base + REG_CTS_CTL);
619 iowrite32(CTS_CTL_SEQUENCER_ENABLE, gth->base + REG_CTS_CTL);
620
621 for (reg = 0, count = CTS_TRIG_WAITLOOP_DEPTH;
622 count && !(reg & BIT(4)); count--) {
623 reg = ioread32(gth->base + REG_CTS_STAT);
624 cpu_relax();
625 }
626 if (!count)
627 dev_dbg(&thdev->dev, "timeout waiting for CTS Trigger\n");
628
629 intel_th_gth_stop(gth, output, false);
630 intel_th_gth_start(gth, output);
631}
632
633
634
635
636
637
638
639
640
641
642
643
644static int intel_th_gth_assign(struct intel_th_device *thdev,
645 struct intel_th_device *othdev)
646{
647 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
648 int i, id;
649
650 if (thdev->host_mode)
651 return -EBUSY;
652
653 if (othdev->type != INTEL_TH_OUTPUT)
654 return -EINVAL;
655
656 for (i = 0, id = 0; i < TH_POSSIBLE_OUTPUTS; i++) {
657 if (gth->output[i].port_type != othdev->output.type)
658 continue;
659
660 if (othdev->id == -1 || othdev->id == id)
661 goto found;
662
663 id++;
664 }
665
666 return -ENOENT;
667
668found:
669 spin_lock(>h->gth_lock);
670 othdev->output.port = i;
671 othdev->output.active = false;
672 gth->output[i].output = &othdev->output;
673 spin_unlock(>h->gth_lock);
674
675 return 0;
676}
677
678
679
680
681
682
683static void intel_th_gth_unassign(struct intel_th_device *thdev,
684 struct intel_th_device *othdev)
685{
686 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
687 int port = othdev->output.port;
688 int master;
689
690 if (thdev->host_mode)
691 return;
692
693 spin_lock(>h->gth_lock);
694 othdev->output.port = -1;
695 othdev->output.active = false;
696 gth->output[port].output = NULL;
697 for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
698 if (gth->master[master] == port)
699 gth->master[master] = -1;
700 spin_unlock(>h->gth_lock);
701}
702
703static int
704intel_th_gth_set_output(struct intel_th_device *thdev, unsigned int master)
705{
706 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
707 int port = 0;
708
709
710
711
712
713 if (master > TH_CONFIGURABLE_MASTERS)
714 master = TH_CONFIGURABLE_MASTERS;
715
716 spin_lock(>h->gth_lock);
717 if (gth->master[master] == -1) {
718 set_bit(master, gth->output[port].master);
719 gth->master[master] = port;
720 }
721 spin_unlock(>h->gth_lock);
722
723 return 0;
724}
725
726static int intel_th_gth_probe(struct intel_th_device *thdev)
727{
728 struct device *dev = &thdev->dev;
729 struct intel_th *th = dev_get_drvdata(dev->parent);
730 struct gth_device *gth;
731 struct resource *res;
732 void __iomem *base;
733 int i, ret;
734
735 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
736 if (!res)
737 return -ENODEV;
738
739 base = devm_ioremap(dev, res->start, resource_size(res));
740 if (!base)
741 return -ENOMEM;
742
743 gth = devm_kzalloc(dev, sizeof(*gth), GFP_KERNEL);
744 if (!gth)
745 return -ENOMEM;
746
747 gth->dev = dev;
748 gth->base = base;
749 spin_lock_init(>h->gth_lock);
750
751 dev_set_drvdata(dev, gth);
752
753
754
755
756
757
758
759 if (thdev->host_mode)
760 return 0;
761
762 ret = intel_th_gth_reset(gth);
763 if (ret) {
764 if (ret != -EBUSY)
765 return ret;
766
767 thdev->host_mode = true;
768
769 return 0;
770 }
771
772 for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++)
773 gth->master[i] = -1;
774
775 for (i = 0; i < TH_POSSIBLE_OUTPUTS; i++) {
776 gth->output[i].gth = gth;
777 gth->output[i].index = i;
778 gth->output[i].port_type =
779 gth_output_parm_get(gth, i, TH_OUTPUT_PARM(port));
780 if (gth->output[i].port_type == GTH_NONE)
781 continue;
782
783 ret = intel_th_output_enable(th, gth->output[i].port_type);
784
785 if (ret && ret != -ENODEV)
786 return ret;
787 }
788
789 if (intel_th_output_attributes(gth) ||
790 intel_th_master_attributes(gth)) {
791 pr_warn("Can't initialize sysfs attributes\n");
792
793 if (gth->output_group.attrs)
794 sysfs_remove_group(>h->dev->kobj, >h->output_group);
795 return -ENOMEM;
796 }
797
798 return 0;
799}
800
801static void intel_th_gth_remove(struct intel_th_device *thdev)
802{
803 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
804
805 sysfs_remove_group(>h->dev->kobj, >h->output_group);
806 sysfs_remove_group(>h->dev->kobj, >h->master_group);
807}
808
809static struct intel_th_driver intel_th_gth_driver = {
810 .probe = intel_th_gth_probe,
811 .remove = intel_th_gth_remove,
812 .assign = intel_th_gth_assign,
813 .unassign = intel_th_gth_unassign,
814 .set_output = intel_th_gth_set_output,
815 .enable = intel_th_gth_enable,
816 .trig_switch = intel_th_gth_switch,
817 .disable = intel_th_gth_disable,
818 .driver = {
819 .name = "gth",
820 .owner = THIS_MODULE,
821 },
822};
823
824module_driver(intel_th_gth_driver,
825 intel_th_driver_register,
826 intel_th_driver_unregister);
827
828MODULE_ALIAS("intel_th_switch");
829MODULE_LICENSE("GPL v2");
830MODULE_DESCRIPTION("Intel(R) Trace Hub Global Trace Hub driver");
831MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
832