1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18#include <linux/types.h>
19#include <linux/module.h>
20#include <linux/device.h>
21#include <linux/io.h>
22#include <linux/mm.h>
23#include <linux/slab.h>
24#include <linux/bitmap.h>
25
26#include "intel_th.h"
27#include "gth.h"
28
29struct gth_device;
30
31
32
33
34
35
36
37
38
39struct gth_output {
40 struct gth_device *gth;
41 struct intel_th_output *output;
42 unsigned int index;
43 unsigned int port_type;
44 DECLARE_BITMAP(master, TH_CONFIGURABLE_MASTERS + 1);
45};
46
47
48
49
50
51
52
53
54
55
56
57struct gth_device {
58 struct device *dev;
59 void __iomem *base;
60
61 struct attribute_group output_group;
62 struct attribute_group master_group;
63 struct gth_output output[TH_POSSIBLE_OUTPUTS];
64 signed char master[TH_CONFIGURABLE_MASTERS + 1];
65 spinlock_t gth_lock;
66};
67
68static void gth_output_set(struct gth_device *gth, int port,
69 unsigned int config)
70{
71 unsigned long reg = port & 4 ? REG_GTH_GTHOPT1 : REG_GTH_GTHOPT0;
72 u32 val;
73 int shift = (port & 3) * 8;
74
75 val = ioread32(gth->base + reg);
76 val &= ~(0xff << shift);
77 val |= config << shift;
78 iowrite32(val, gth->base + reg);
79}
80
81static unsigned int gth_output_get(struct gth_device *gth, int port)
82{
83 unsigned long reg = port & 4 ? REG_GTH_GTHOPT1 : REG_GTH_GTHOPT0;
84 u32 val;
85 int shift = (port & 3) * 8;
86
87 val = ioread32(gth->base + reg);
88 val &= 0xff << shift;
89 val >>= shift;
90
91 return val;
92}
93
94static void gth_smcfreq_set(struct gth_device *gth, int port,
95 unsigned int freq)
96{
97 unsigned long reg = REG_GTH_SMCR0 + ((port / 2) * 4);
98 int shift = (port & 1) * 16;
99 u32 val;
100
101 val = ioread32(gth->base + reg);
102 val &= ~(0xffff << shift);
103 val |= freq << shift;
104 iowrite32(val, gth->base + reg);
105}
106
107static unsigned int gth_smcfreq_get(struct gth_device *gth, int port)
108{
109 unsigned long reg = REG_GTH_SMCR0 + ((port / 2) * 4);
110 int shift = (port & 1) * 16;
111 u32 val;
112
113 val = ioread32(gth->base + reg);
114 val &= 0xffff << shift;
115 val >>= shift;
116
117 return val;
118}
119
120
121
122
123
124struct master_attribute {
125 struct device_attribute attr;
126 struct gth_device *gth;
127 unsigned int master;
128};
129
130static void
131gth_master_set(struct gth_device *gth, unsigned int master, int port)
132{
133 unsigned int reg = REG_GTH_SWDEST0 + ((master >> 1) & ~3u);
134 unsigned int shift = (master & 0x7) * 4;
135 u32 val;
136
137 if (master >= 256) {
138 reg = REG_GTH_GSWTDEST;
139 shift = 0;
140 }
141
142 val = ioread32(gth->base + reg);
143 val &= ~(0xf << shift);
144 if (port >= 0)
145 val |= (0x8 | port) << shift;
146 iowrite32(val, gth->base + reg);
147}
148
149static ssize_t master_attr_show(struct device *dev,
150 struct device_attribute *attr,
151 char *buf)
152{
153 struct master_attribute *ma =
154 container_of(attr, struct master_attribute, attr);
155 struct gth_device *gth = ma->gth;
156 size_t count;
157 int port;
158
159 spin_lock(>h->gth_lock);
160 port = gth->master[ma->master];
161 spin_unlock(>h->gth_lock);
162
163 if (port >= 0)
164 count = snprintf(buf, PAGE_SIZE, "%x\n", port);
165 else
166 count = snprintf(buf, PAGE_SIZE, "disabled\n");
167
168 return count;
169}
170
171static ssize_t master_attr_store(struct device *dev,
172 struct device_attribute *attr,
173 const char *buf, size_t count)
174{
175 struct master_attribute *ma =
176 container_of(attr, struct master_attribute, attr);
177 struct gth_device *gth = ma->gth;
178 int old_port, port;
179
180 if (kstrtoint(buf, 10, &port) < 0)
181 return -EINVAL;
182
183 if (port >= TH_POSSIBLE_OUTPUTS || port < -1)
184 return -EINVAL;
185
186 spin_lock(>h->gth_lock);
187
188
189 old_port = gth->master[ma->master];
190 if (old_port >= 0) {
191 gth->master[ma->master] = -1;
192 clear_bit(ma->master, gth->output[old_port].master);
193 if (gth->output[old_port].output->active)
194 gth_master_set(gth, ma->master, -1);
195 }
196
197
198 if (port >= 0) {
199
200 if (!gth->output[port].output) {
201 count = -ENODEV;
202 goto unlock;
203 }
204
205 set_bit(ma->master, gth->output[port].master);
206
207
208 if (gth->output[port].output->active)
209 gth_master_set(gth, ma->master, port);
210 }
211
212 gth->master[ma->master] = port;
213
214unlock:
215 spin_unlock(>h->gth_lock);
216
217 return count;
218}
219
220struct output_attribute {
221 struct device_attribute attr;
222 struct gth_device *gth;
223 unsigned int port;
224 unsigned int parm;
225};
226
227#define OUTPUT_PARM(_name, _mask, _r, _w, _what) \
228 [TH_OUTPUT_PARM(_name)] = { .name = __stringify(_name), \
229 .get = gth_ ## _what ## _get, \
230 .set = gth_ ## _what ## _set, \
231 .mask = (_mask), \
232 .readable = (_r), \
233 .writable = (_w) }
234
235static const struct output_parm {
236 const char *name;
237 unsigned int (*get)(struct gth_device *gth, int port);
238 void (*set)(struct gth_device *gth, int port,
239 unsigned int val);
240 unsigned int mask;
241 unsigned int readable : 1,
242 writable : 1;
243} output_parms[] = {
244 OUTPUT_PARM(port, 0x7, 1, 0, output),
245 OUTPUT_PARM(null, BIT(3), 1, 1, output),
246 OUTPUT_PARM(drop, BIT(4), 1, 1, output),
247 OUTPUT_PARM(reset, BIT(5), 1, 0, output),
248 OUTPUT_PARM(flush, BIT(7), 0, 1, output),
249 OUTPUT_PARM(smcfreq, 0xffff, 1, 1, smcfreq),
250};
251
252static void
253gth_output_parm_set(struct gth_device *gth, int port, unsigned int parm,
254 unsigned int val)
255{
256 unsigned int config = output_parms[parm].get(gth, port);
257 unsigned int mask = output_parms[parm].mask;
258 unsigned int shift = __ffs(mask);
259
260 config &= ~mask;
261 config |= (val << shift) & mask;
262 output_parms[parm].set(gth, port, config);
263}
264
265static unsigned int
266gth_output_parm_get(struct gth_device *gth, int port, unsigned int parm)
267{
268 unsigned int config = output_parms[parm].get(gth, port);
269 unsigned int mask = output_parms[parm].mask;
270 unsigned int shift = __ffs(mask);
271
272 config &= mask;
273 config >>= shift;
274 return config;
275}
276
277
278
279
280static int intel_th_gth_reset(struct gth_device *gth)
281{
282 u32 scratchpad;
283 int port, i;
284
285 scratchpad = ioread32(gth->base + REG_GTH_SCRPD0);
286 if (scratchpad & SCRPD_DEBUGGER_IN_USE)
287 return -EBUSY;
288
289
290 scratchpad |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED;
291 iowrite32(scratchpad, gth->base + REG_GTH_SCRPD0);
292
293
294 for (port = 0; port < 8; port++) {
295 if (gth_output_parm_get(gth, port, TH_OUTPUT_PARM(port)) ==
296 GTH_NONE)
297 continue;
298
299 gth_output_set(gth, port, 0);
300 gth_smcfreq_set(gth, port, 16);
301 }
302
303 iowrite32(0, gth->base + REG_GTH_DESTOVR);
304
305
306 for (i = 0; i < 33; i++)
307 iowrite32(0, gth->base + REG_GTH_SWDEST0 + i * 4);
308
309
310 iowrite32(0, gth->base + REG_GTH_SCR);
311 iowrite32(0xfc, gth->base + REG_GTH_SCR2);
312
313 return 0;
314}
315
316
317
318
319
320static ssize_t output_attr_show(struct device *dev,
321 struct device_attribute *attr,
322 char *buf)
323{
324 struct output_attribute *oa =
325 container_of(attr, struct output_attribute, attr);
326 struct gth_device *gth = oa->gth;
327 size_t count;
328
329 spin_lock(>h->gth_lock);
330 count = snprintf(buf, PAGE_SIZE, "%x\n",
331 gth_output_parm_get(gth, oa->port, oa->parm));
332 spin_unlock(>h->gth_lock);
333
334 return count;
335}
336
337static ssize_t output_attr_store(struct device *dev,
338 struct device_attribute *attr,
339 const char *buf, size_t count)
340{
341 struct output_attribute *oa =
342 container_of(attr, struct output_attribute, attr);
343 struct gth_device *gth = oa->gth;
344 unsigned int config;
345
346 if (kstrtouint(buf, 16, &config) < 0)
347 return -EINVAL;
348
349 spin_lock(>h->gth_lock);
350 gth_output_parm_set(gth, oa->port, oa->parm, config);
351 spin_unlock(>h->gth_lock);
352
353 return count;
354}
355
356static int intel_th_master_attributes(struct gth_device *gth)
357{
358 struct master_attribute *master_attrs;
359 struct attribute **attrs;
360 int i, nattrs = TH_CONFIGURABLE_MASTERS + 2;
361
362 attrs = devm_kcalloc(gth->dev, nattrs, sizeof(void *), GFP_KERNEL);
363 if (!attrs)
364 return -ENOMEM;
365
366 master_attrs = devm_kcalloc(gth->dev, nattrs,
367 sizeof(struct master_attribute),
368 GFP_KERNEL);
369 if (!master_attrs)
370 return -ENOMEM;
371
372 for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++) {
373 char *name;
374
375 name = devm_kasprintf(gth->dev, GFP_KERNEL, "%d%s", i,
376 i == TH_CONFIGURABLE_MASTERS ? "+" : "");
377 if (!name)
378 return -ENOMEM;
379
380 master_attrs[i].attr.attr.name = name;
381 master_attrs[i].attr.attr.mode = S_IRUGO | S_IWUSR;
382 master_attrs[i].attr.show = master_attr_show;
383 master_attrs[i].attr.store = master_attr_store;
384
385 sysfs_attr_init(&master_attrs[i].attr.attr);
386 attrs[i] = &master_attrs[i].attr.attr;
387
388 master_attrs[i].gth = gth;
389 master_attrs[i].master = i;
390 }
391
392 gth->master_group.name = "masters";
393 gth->master_group.attrs = attrs;
394
395 return sysfs_create_group(>h->dev->kobj, >h->master_group);
396}
397
398static int intel_th_output_attributes(struct gth_device *gth)
399{
400 struct output_attribute *out_attrs;
401 struct attribute **attrs;
402 int i, j, nouts = TH_POSSIBLE_OUTPUTS;
403 int nparms = ARRAY_SIZE(output_parms);
404 int nattrs = nouts * nparms + 1;
405
406 attrs = devm_kcalloc(gth->dev, nattrs, sizeof(void *), GFP_KERNEL);
407 if (!attrs)
408 return -ENOMEM;
409
410 out_attrs = devm_kcalloc(gth->dev, nattrs,
411 sizeof(struct output_attribute),
412 GFP_KERNEL);
413 if (!out_attrs)
414 return -ENOMEM;
415
416 for (i = 0; i < nouts; i++) {
417 for (j = 0; j < nparms; j++) {
418 unsigned int idx = i * nparms + j;
419 char *name;
420
421 name = devm_kasprintf(gth->dev, GFP_KERNEL, "%d_%s", i,
422 output_parms[j].name);
423 if (!name)
424 return -ENOMEM;
425
426 out_attrs[idx].attr.attr.name = name;
427
428 if (output_parms[j].readable) {
429 out_attrs[idx].attr.attr.mode |= S_IRUGO;
430 out_attrs[idx].attr.show = output_attr_show;
431 }
432
433 if (output_parms[j].writable) {
434 out_attrs[idx].attr.attr.mode |= S_IWUSR;
435 out_attrs[idx].attr.store = output_attr_store;
436 }
437
438 sysfs_attr_init(&out_attrs[idx].attr.attr);
439 attrs[idx] = &out_attrs[idx].attr.attr;
440
441 out_attrs[idx].gth = gth;
442 out_attrs[idx].port = i;
443 out_attrs[idx].parm = j;
444 }
445 }
446
447 gth->output_group.name = "outputs";
448 gth->output_group.attrs = attrs;
449
450 return sysfs_create_group(>h->dev->kobj, >h->output_group);
451}
452
453
454
455
456
457
458
459
460
461
462static void intel_th_gth_disable(struct intel_th_device *thdev,
463 struct intel_th_output *output)
464{
465 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
466 unsigned long count;
467 int master;
468 u32 reg;
469
470 spin_lock(>h->gth_lock);
471 output->active = false;
472
473 for_each_set_bit(master, gth->output[output->port].master,
474 TH_CONFIGURABLE_MASTERS) {
475 gth_master_set(gth, master, -1);
476 }
477 spin_unlock(>h->gth_lock);
478
479 iowrite32(0, gth->base + REG_GTH_SCR);
480 iowrite32(0xfd, gth->base + REG_GTH_SCR2);
481
482
483 for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH;
484 count && !(reg & BIT(output->port)); count--) {
485 reg = ioread32(gth->base + REG_GTH_STAT);
486 cpu_relax();
487 }
488
489
490 iowrite32(0xfc, gth->base + REG_GTH_SCR2);
491
492 if (!count)
493 dev_dbg(&thdev->dev, "timeout waiting for GTH[%d] PLE\n",
494 output->port);
495
496 reg = ioread32(gth->base + REG_GTH_SCRPD0);
497 reg &= ~output->scratchpad;
498 iowrite32(reg, gth->base + REG_GTH_SCRPD0);
499}
500
501
502
503
504
505
506
507
508
509static void intel_th_gth_enable(struct intel_th_device *thdev,
510 struct intel_th_output *output)
511{
512 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
513 u32 scr = 0xfc0000, scrpd;
514 int master;
515
516 spin_lock(>h->gth_lock);
517 for_each_set_bit(master, gth->output[output->port].master,
518 TH_CONFIGURABLE_MASTERS + 1) {
519 gth_master_set(gth, master, output->port);
520 }
521
522 if (output->multiblock)
523 scr |= 0xff;
524
525 output->active = true;
526 spin_unlock(>h->gth_lock);
527
528 scrpd = ioread32(gth->base + REG_GTH_SCRPD0);
529 scrpd |= output->scratchpad;
530 iowrite32(scrpd, gth->base + REG_GTH_SCRPD0);
531
532 iowrite32(scr, gth->base + REG_GTH_SCR);
533 iowrite32(0, gth->base + REG_GTH_SCR2);
534}
535
536
537
538
539
540
541
542
543
544
545
546
547static int intel_th_gth_assign(struct intel_th_device *thdev,
548 struct intel_th_device *othdev)
549{
550 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
551 int i, id;
552
553 if (othdev->type != INTEL_TH_OUTPUT)
554 return -EINVAL;
555
556 for (i = 0, id = 0; i < TH_POSSIBLE_OUTPUTS; i++) {
557 if (gth->output[i].port_type != othdev->output.type)
558 continue;
559
560 if (othdev->id == -1 || othdev->id == id)
561 goto found;
562
563 id++;
564 }
565
566 return -ENOENT;
567
568found:
569 spin_lock(>h->gth_lock);
570 othdev->output.port = i;
571 othdev->output.active = false;
572 gth->output[i].output = &othdev->output;
573 spin_unlock(>h->gth_lock);
574
575 return 0;
576}
577
578
579
580
581
582
583static void intel_th_gth_unassign(struct intel_th_device *thdev,
584 struct intel_th_device *othdev)
585{
586 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
587 int port = othdev->output.port;
588
589 spin_lock(>h->gth_lock);
590 othdev->output.port = -1;
591 othdev->output.active = false;
592 gth->output[port].output = NULL;
593 spin_unlock(>h->gth_lock);
594}
595
596static int
597intel_th_gth_set_output(struct intel_th_device *thdev, unsigned int master)
598{
599 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
600 int port = 0;
601
602
603
604
605
606 if (master > TH_CONFIGURABLE_MASTERS)
607 master = TH_CONFIGURABLE_MASTERS;
608
609 spin_lock(>h->gth_lock);
610 if (gth->master[master] == -1) {
611 set_bit(master, gth->output[port].master);
612 gth->master[master] = port;
613 }
614 spin_unlock(>h->gth_lock);
615
616 return 0;
617}
618
619static int intel_th_gth_probe(struct intel_th_device *thdev)
620{
621 struct device *dev = &thdev->dev;
622 struct gth_device *gth;
623 struct resource *res;
624 void __iomem *base;
625 int i, ret;
626
627 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
628 if (!res)
629 return -ENODEV;
630
631 base = devm_ioremap(dev, res->start, resource_size(res));
632 if (!base)
633 return -ENOMEM;
634
635 gth = devm_kzalloc(dev, sizeof(*gth), GFP_KERNEL);
636 if (!gth)
637 return -ENOMEM;
638
639 gth->dev = dev;
640 gth->base = base;
641 spin_lock_init(>h->gth_lock);
642
643 ret = intel_th_gth_reset(gth);
644 if (ret)
645 return ret;
646
647 for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++)
648 gth->master[i] = -1;
649
650 for (i = 0; i < TH_POSSIBLE_OUTPUTS; i++) {
651 gth->output[i].gth = gth;
652 gth->output[i].index = i;
653 gth->output[i].port_type =
654 gth_output_parm_get(gth, i, TH_OUTPUT_PARM(port));
655 }
656
657 if (intel_th_output_attributes(gth) ||
658 intel_th_master_attributes(gth)) {
659 pr_warn("Can't initialize sysfs attributes\n");
660
661 if (gth->output_group.attrs)
662 sysfs_remove_group(>h->dev->kobj, >h->output_group);
663 return -ENOMEM;
664 }
665
666 dev_set_drvdata(dev, gth);
667
668 return 0;
669}
670
671static void intel_th_gth_remove(struct intel_th_device *thdev)
672{
673 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
674
675 sysfs_remove_group(>h->dev->kobj, >h->output_group);
676 sysfs_remove_group(>h->dev->kobj, >h->master_group);
677}
678
679static struct intel_th_driver intel_th_gth_driver = {
680 .probe = intel_th_gth_probe,
681 .remove = intel_th_gth_remove,
682 .assign = intel_th_gth_assign,
683 .unassign = intel_th_gth_unassign,
684 .set_output = intel_th_gth_set_output,
685 .enable = intel_th_gth_enable,
686 .disable = intel_th_gth_disable,
687 .driver = {
688 .name = "gth",
689 .owner = THIS_MODULE,
690 },
691};
692
693module_driver(intel_th_gth_driver,
694 intel_th_driver_register,
695 intel_th_driver_unregister);
696
697MODULE_ALIAS("intel_th_switch");
698MODULE_LICENSE("GPL v2");
699MODULE_DESCRIPTION("Intel(R) Trace Hub Global Trace Hub driver");
700MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
701