1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/module.h>
27#include <linux/pci.h>
28#include <linux/platform_device.h>
29#include <linux/kernel.h>
30#include <linux/delay.h>
31#include <linux/stddef.h>
32#include <linux/interrupt.h>
33#include <linux/init.h>
34#include <linux/irq.h>
35#include <linux/io.h>
36#include <linux/gpio.h>
37#include <linux/slab.h>
38#include <linux/pm_runtime.h>
39#include <linux/irqdomain.h>
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55enum GPIO_REG {
56 GPLR = 0,
57 GPDR,
58 GPSR,
59 GPCR,
60 GRER,
61 GFER,
62 GEDR,
63 GAFR,
64};
65
66struct lnw_gpio {
67 struct gpio_chip chip;
68 void *reg_base;
69 spinlock_t lock;
70 struct pci_dev *pdev;
71 struct irq_domain *domain;
72};
73
74static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
75 enum GPIO_REG reg_type)
76{
77 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
78 unsigned nreg = chip->ngpio / 32;
79 u8 reg = offset / 32;
80 void __iomem *ptr;
81
82 ptr = (void __iomem *)(lnw->reg_base + reg_type * nreg * 4 + reg * 4);
83 return ptr;
84}
85
86static void __iomem *gpio_reg_2bit(struct gpio_chip *chip, unsigned offset,
87 enum GPIO_REG reg_type)
88{
89 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
90 unsigned nreg = chip->ngpio / 32;
91 u8 reg = offset / 16;
92 void __iomem *ptr;
93
94 ptr = (void __iomem *)(lnw->reg_base + reg_type * nreg * 4 + reg * 4);
95 return ptr;
96}
97
98static int lnw_gpio_request(struct gpio_chip *chip, unsigned offset)
99{
100 void __iomem *gafr = gpio_reg_2bit(chip, offset, GAFR);
101 u32 value = readl(gafr);
102 int shift = (offset % 16) << 1, af = (value >> shift) & 3;
103
104 if (af) {
105 value &= ~(3 << shift);
106 writel(value, gafr);
107 }
108 return 0;
109}
110
111static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset)
112{
113 void __iomem *gplr = gpio_reg(chip, offset, GPLR);
114
115 return readl(gplr) & BIT(offset % 32);
116}
117
118static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
119{
120 void __iomem *gpsr, *gpcr;
121
122 if (value) {
123 gpsr = gpio_reg(chip, offset, GPSR);
124 writel(BIT(offset % 32), gpsr);
125 } else {
126 gpcr = gpio_reg(chip, offset, GPCR);
127 writel(BIT(offset % 32), gpcr);
128 }
129}
130
131static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
132{
133 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
134 void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
135 u32 value;
136 unsigned long flags;
137
138 if (lnw->pdev)
139 pm_runtime_get(&lnw->pdev->dev);
140
141 spin_lock_irqsave(&lnw->lock, flags);
142 value = readl(gpdr);
143 value &= ~BIT(offset % 32);
144 writel(value, gpdr);
145 spin_unlock_irqrestore(&lnw->lock, flags);
146
147 if (lnw->pdev)
148 pm_runtime_put(&lnw->pdev->dev);
149
150 return 0;
151}
152
153static int lnw_gpio_direction_output(struct gpio_chip *chip,
154 unsigned offset, int value)
155{
156 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
157 void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
158 unsigned long flags;
159
160 lnw_gpio_set(chip, offset, value);
161
162 if (lnw->pdev)
163 pm_runtime_get(&lnw->pdev->dev);
164
165 spin_lock_irqsave(&lnw->lock, flags);
166 value = readl(gpdr);
167 value |= BIT(offset % 32);
168 writel(value, gpdr);
169 spin_unlock_irqrestore(&lnw->lock, flags);
170
171 if (lnw->pdev)
172 pm_runtime_put(&lnw->pdev->dev);
173
174 return 0;
175}
176
177static int lnw_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
178{
179 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
180 return irq_create_mapping(lnw->domain, offset);
181}
182
183static int lnw_irq_type(struct irq_data *d, unsigned type)
184{
185 struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
186 u32 gpio = irqd_to_hwirq(d);
187 unsigned long flags;
188 u32 value;
189 void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
190 void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER);
191
192 if (gpio >= lnw->chip.ngpio)
193 return -EINVAL;
194
195 if (lnw->pdev)
196 pm_runtime_get(&lnw->pdev->dev);
197
198 spin_lock_irqsave(&lnw->lock, flags);
199 if (type & IRQ_TYPE_EDGE_RISING)
200 value = readl(grer) | BIT(gpio % 32);
201 else
202 value = readl(grer) & (~BIT(gpio % 32));
203 writel(value, grer);
204
205 if (type & IRQ_TYPE_EDGE_FALLING)
206 value = readl(gfer) | BIT(gpio % 32);
207 else
208 value = readl(gfer) & (~BIT(gpio % 32));
209 writel(value, gfer);
210 spin_unlock_irqrestore(&lnw->lock, flags);
211
212 if (lnw->pdev)
213 pm_runtime_put(&lnw->pdev->dev);
214
215 return 0;
216}
217
218static void lnw_irq_unmask(struct irq_data *d)
219{
220}
221
222static void lnw_irq_mask(struct irq_data *d)
223{
224}
225
226static struct irq_chip lnw_irqchip = {
227 .name = "LNW-GPIO",
228 .irq_mask = lnw_irq_mask,
229 .irq_unmask = lnw_irq_unmask,
230 .irq_set_type = lnw_irq_type,
231};
232
233static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = {
234 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f), .driver_data = 64 },
235 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f), .driver_data = 96 },
236 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a), .driver_data = 96 },
237 { 0, }
238};
239MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
240
241static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
242{
243 struct irq_data *data = irq_desc_get_irq_data(desc);
244 struct lnw_gpio *lnw = irq_data_get_irq_handler_data(data);
245 struct irq_chip *chip = irq_data_get_irq_chip(data);
246 u32 base, gpio, mask;
247 unsigned long pending;
248 void __iomem *gedr;
249
250
251 for (base = 0; base < lnw->chip.ngpio; base += 32) {
252 gedr = gpio_reg(&lnw->chip, base, GEDR);
253 while ((pending = readl(gedr))) {
254 gpio = __ffs(pending);
255 mask = BIT(gpio);
256
257 writel(mask, gedr);
258 generic_handle_irq(irq_find_mapping(lnw->domain,
259 base + gpio));
260 }
261 }
262
263 chip->irq_eoi(data);
264}
265
266static void lnw_irq_init_hw(struct lnw_gpio *lnw)
267{
268 void __iomem *reg;
269 unsigned base;
270
271 for (base = 0; base < lnw->chip.ngpio; base += 32) {
272
273 reg = gpio_reg(&lnw->chip, base, GRER);
274 writel(0, reg);
275
276 reg = gpio_reg(&lnw->chip, base, GFER);
277 writel(0, reg);
278
279 reg = gpio_reg(&lnw->chip, base, GEDR);
280 writel(~0, reg);
281 }
282}
283
284static int lnw_gpio_irq_map(struct irq_domain *d, unsigned int virq,
285 irq_hw_number_t hw)
286{
287 struct lnw_gpio *lnw = d->host_data;
288
289 irq_set_chip_and_handler_name(virq, &lnw_irqchip, handle_simple_irq,
290 "demux");
291 irq_set_chip_data(virq, lnw);
292 irq_set_irq_type(virq, IRQ_TYPE_NONE);
293
294 return 0;
295}
296
297static const struct irq_domain_ops lnw_gpio_irq_ops = {
298 .map = lnw_gpio_irq_map,
299 .xlate = irq_domain_xlate_twocell,
300};
301
302#ifdef CONFIG_PM
303static int lnw_gpio_runtime_resume(struct device *dev)
304{
305 return 0;
306}
307
308static int lnw_gpio_runtime_suspend(struct device *dev)
309{
310 return 0;
311}
312
313static int lnw_gpio_runtime_idle(struct device *dev)
314{
315 int err = pm_schedule_suspend(dev, 500);
316
317 if (!err)
318 return 0;
319
320 return -EBUSY;
321}
322
323#else
324#define lnw_gpio_runtime_suspend NULL
325#define lnw_gpio_runtime_resume NULL
326#define lnw_gpio_runtime_idle NULL
327#endif
328
329static const struct dev_pm_ops lnw_gpio_pm_ops = {
330 .runtime_suspend = lnw_gpio_runtime_suspend,
331 .runtime_resume = lnw_gpio_runtime_resume,
332 .runtime_idle = lnw_gpio_runtime_idle,
333};
334
335static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
336 const struct pci_device_id *id)
337{
338 void *base;
339 resource_size_t start, len;
340 struct lnw_gpio *lnw;
341 u32 gpio_base;
342 int retval;
343 int ngpio = id->driver_data;
344
345 retval = pci_enable_device(pdev);
346 if (retval)
347 return retval;
348
349 retval = pci_request_regions(pdev, "langwell_gpio");
350 if (retval) {
351 dev_err(&pdev->dev, "error requesting resources\n");
352 goto err2;
353 }
354
355 start = pci_resource_start(pdev, 1);
356 len = pci_resource_len(pdev, 1);
357 base = ioremap_nocache(start, len);
358 if (!base) {
359 dev_err(&pdev->dev, "error mapping bar1\n");
360 retval = -EFAULT;
361 goto err3;
362 }
363 gpio_base = *((u32 *)base + 1);
364
365 iounmap(base);
366
367 start = pci_resource_start(pdev, 0);
368 len = pci_resource_len(pdev, 0);
369 base = devm_ioremap_nocache(&pdev->dev, start, len);
370 if (!base) {
371 dev_err(&pdev->dev, "error mapping bar0\n");
372 retval = -EFAULT;
373 goto err3;
374 }
375
376 lnw = devm_kzalloc(&pdev->dev, sizeof(struct lnw_gpio), GFP_KERNEL);
377 if (!lnw) {
378 dev_err(&pdev->dev, "can't allocate langwell_gpio chip data\n");
379 retval = -ENOMEM;
380 goto err3;
381 }
382
383 lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio,
384 &lnw_gpio_irq_ops, lnw);
385 if (!lnw->domain) {
386 retval = -ENOMEM;
387 goto err3;
388 }
389
390 lnw->reg_base = base;
391 lnw->chip.label = dev_name(&pdev->dev);
392 lnw->chip.request = lnw_gpio_request;
393 lnw->chip.direction_input = lnw_gpio_direction_input;
394 lnw->chip.direction_output = lnw_gpio_direction_output;
395 lnw->chip.get = lnw_gpio_get;
396 lnw->chip.set = lnw_gpio_set;
397 lnw->chip.to_irq = lnw_gpio_to_irq;
398 lnw->chip.base = gpio_base;
399 lnw->chip.ngpio = ngpio;
400 lnw->chip.can_sleep = 0;
401 lnw->pdev = pdev;
402 pci_set_drvdata(pdev, lnw);
403 retval = gpiochip_add(&lnw->chip);
404 if (retval) {
405 dev_err(&pdev->dev, "langwell gpiochip_add error %d\n", retval);
406 goto err3;
407 }
408
409 lnw_irq_init_hw(lnw);
410
411 irq_set_handler_data(pdev->irq, lnw);
412 irq_set_chained_handler(pdev->irq, lnw_irq_handler);
413
414 spin_lock_init(&lnw->lock);
415
416 pm_runtime_put_noidle(&pdev->dev);
417 pm_runtime_allow(&pdev->dev);
418
419 return 0;
420
421err3:
422 pci_release_regions(pdev);
423err2:
424 pci_disable_device(pdev);
425 return retval;
426}
427
428static struct pci_driver lnw_gpio_driver = {
429 .name = "langwell_gpio",
430 .id_table = lnw_gpio_ids,
431 .probe = lnw_gpio_probe,
432 .driver = {
433 .pm = &lnw_gpio_pm_ops,
434 },
435};
436
437
438static int __devinit wp_gpio_probe(struct platform_device *pdev)
439{
440 struct lnw_gpio *lnw;
441 struct gpio_chip *gc;
442 struct resource *rc;
443 int retval = 0;
444
445 rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
446 if (!rc)
447 return -EINVAL;
448
449 lnw = kzalloc(sizeof(struct lnw_gpio), GFP_KERNEL);
450 if (!lnw) {
451 dev_err(&pdev->dev,
452 "can't allocate whitneypoint_gpio chip data\n");
453 return -ENOMEM;
454 }
455 lnw->reg_base = ioremap_nocache(rc->start, resource_size(rc));
456 if (lnw->reg_base == NULL) {
457 retval = -EINVAL;
458 goto err_kmalloc;
459 }
460 spin_lock_init(&lnw->lock);
461 gc = &lnw->chip;
462 gc->label = dev_name(&pdev->dev);
463 gc->owner = THIS_MODULE;
464 gc->direction_input = lnw_gpio_direction_input;
465 gc->direction_output = lnw_gpio_direction_output;
466 gc->get = lnw_gpio_get;
467 gc->set = lnw_gpio_set;
468 gc->to_irq = NULL;
469 gc->base = 0;
470 gc->ngpio = 64;
471 gc->can_sleep = 0;
472 retval = gpiochip_add(gc);
473 if (retval) {
474 dev_err(&pdev->dev, "whitneypoint gpiochip_add error %d\n",
475 retval);
476 goto err_ioremap;
477 }
478 platform_set_drvdata(pdev, lnw);
479 return 0;
480err_ioremap:
481 iounmap(lnw->reg_base);
482err_kmalloc:
483 kfree(lnw);
484 return retval;
485}
486
487static int __devexit wp_gpio_remove(struct platform_device *pdev)
488{
489 struct lnw_gpio *lnw = platform_get_drvdata(pdev);
490 int err;
491 err = gpiochip_remove(&lnw->chip);
492 if (err)
493 dev_err(&pdev->dev, "failed to remove gpio_chip.\n");
494 iounmap(lnw->reg_base);
495 kfree(lnw);
496 platform_set_drvdata(pdev, NULL);
497 return 0;
498}
499
500static struct platform_driver wp_gpio_driver = {
501 .probe = wp_gpio_probe,
502 .remove = __devexit_p(wp_gpio_remove),
503 .driver = {
504 .name = "wp_gpio",
505 .owner = THIS_MODULE,
506 },
507};
508
509static int __init lnw_gpio_init(void)
510{
511 int ret;
512 ret = pci_register_driver(&lnw_gpio_driver);
513 if (ret < 0)
514 return ret;
515 ret = platform_driver_register(&wp_gpio_driver);
516 if (ret < 0)
517 pci_unregister_driver(&lnw_gpio_driver);
518 return ret;
519}
520
521device_initcall(lnw_gpio_init);
522