1
2
3
4
5
6
7
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/gpio.h>
13#include <linux/irq.h>
14#include <linux/irqdomain.h>
15#include <linux/mutex.h>
16
17#include "greybus.h"
18#include "gbphy.h"
19
20struct gb_gpio_line {
21
22
23 u8 active: 1,
24 direction: 1,
25 value: 1;
26 u16 debounce_usec;
27
28 u8 irq_type;
29 bool irq_type_pending;
30 bool masked;
31 bool masked_pending;
32};
33
34struct gb_gpio_controller {
35 struct gbphy_device *gbphy_dev;
36 struct gb_connection *connection;
37 u8 line_max;
38 struct gb_gpio_line *lines;
39
40 struct gpio_chip chip;
41 struct irq_chip irqc;
42 struct irq_chip *irqchip;
43 struct irq_domain *irqdomain;
44 unsigned int irq_base;
45 irq_flow_handler_t irq_handler;
46 unsigned int irq_default_type;
47 struct mutex irq_lock;
48};
49#define gpio_chip_to_gb_gpio_controller(chip) \
50 container_of(chip, struct gb_gpio_controller, chip)
51#define irq_data_to_gpio_chip(d) (d->domain->host_data)
52
53static int gb_gpio_line_count_operation(struct gb_gpio_controller *ggc)
54{
55 struct gb_gpio_line_count_response response;
56 int ret;
57
58 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_LINE_COUNT,
59 NULL, 0, &response, sizeof(response));
60 if (!ret)
61 ggc->line_max = response.count;
62 return ret;
63}
64
65static int gb_gpio_activate_operation(struct gb_gpio_controller *ggc, u8 which)
66{
67 struct gb_gpio_activate_request request;
68 struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
69 int ret;
70
71 ret = gbphy_runtime_get_sync(gbphy_dev);
72 if (ret)
73 return ret;
74
75 request.which = which;
76 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_ACTIVATE,
77 &request, sizeof(request), NULL, 0);
78 if (ret) {
79 gbphy_runtime_put_autosuspend(gbphy_dev);
80 return ret;
81 }
82
83 ggc->lines[which].active = true;
84
85 return 0;
86}
87
88static void gb_gpio_deactivate_operation(struct gb_gpio_controller *ggc,
89 u8 which)
90{
91 struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
92 struct device *dev = &gbphy_dev->dev;
93 struct gb_gpio_deactivate_request request;
94 int ret;
95
96 request.which = which;
97 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DEACTIVATE,
98 &request, sizeof(request), NULL, 0);
99 if (ret) {
100 dev_err(dev, "failed to deactivate gpio %u\n", which);
101 goto out_pm_put;
102 }
103
104 ggc->lines[which].active = false;
105
106out_pm_put:
107 gbphy_runtime_put_autosuspend(gbphy_dev);
108}
109
110static int gb_gpio_get_direction_operation(struct gb_gpio_controller *ggc,
111 u8 which)
112{
113 struct device *dev = &ggc->gbphy_dev->dev;
114 struct gb_gpio_get_direction_request request;
115 struct gb_gpio_get_direction_response response;
116 int ret;
117 u8 direction;
118
119 request.which = which;
120 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_DIRECTION,
121 &request, sizeof(request),
122 &response, sizeof(response));
123 if (ret)
124 return ret;
125
126 direction = response.direction;
127 if (direction && direction != 1) {
128 dev_warn(dev, "gpio %u direction was %u (should be 0 or 1)\n",
129 which, direction);
130 }
131 ggc->lines[which].direction = direction ? 1 : 0;
132 return 0;
133}
134
135static int gb_gpio_direction_in_operation(struct gb_gpio_controller *ggc,
136 u8 which)
137{
138 struct gb_gpio_direction_in_request request;
139 int ret;
140
141 request.which = which;
142 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_IN,
143 &request, sizeof(request), NULL, 0);
144 if (!ret)
145 ggc->lines[which].direction = 1;
146 return ret;
147}
148
149static int gb_gpio_direction_out_operation(struct gb_gpio_controller *ggc,
150 u8 which, bool value_high)
151{
152 struct gb_gpio_direction_out_request request;
153 int ret;
154
155 request.which = which;
156 request.value = value_high ? 1 : 0;
157 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_OUT,
158 &request, sizeof(request), NULL, 0);
159 if (!ret)
160 ggc->lines[which].direction = 0;
161 return ret;
162}
163
164static int gb_gpio_get_value_operation(struct gb_gpio_controller *ggc,
165 u8 which)
166{
167 struct device *dev = &ggc->gbphy_dev->dev;
168 struct gb_gpio_get_value_request request;
169 struct gb_gpio_get_value_response response;
170 int ret;
171 u8 value;
172
173 request.which = which;
174 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_VALUE,
175 &request, sizeof(request),
176 &response, sizeof(response));
177 if (ret) {
178 dev_err(dev, "failed to get value of gpio %u\n", which);
179 return ret;
180 }
181
182 value = response.value;
183 if (value && value != 1) {
184 dev_warn(dev, "gpio %u value was %u (should be 0 or 1)\n",
185 which, value);
186 }
187 ggc->lines[which].value = value ? 1 : 0;
188 return 0;
189}
190
191static void gb_gpio_set_value_operation(struct gb_gpio_controller *ggc,
192 u8 which, bool value_high)
193{
194 struct device *dev = &ggc->gbphy_dev->dev;
195 struct gb_gpio_set_value_request request;
196 int ret;
197
198 if (ggc->lines[which].direction == 1) {
199 dev_warn(dev, "refusing to set value of input gpio %u\n",
200 which);
201 return;
202 }
203
204 request.which = which;
205 request.value = value_high ? 1 : 0;
206 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_VALUE,
207 &request, sizeof(request), NULL, 0);
208 if (ret) {
209 dev_err(dev, "failed to set value of gpio %u\n", which);
210 return;
211 }
212
213 ggc->lines[which].value = request.value;
214}
215
216static int gb_gpio_set_debounce_operation(struct gb_gpio_controller *ggc,
217 u8 which, u16 debounce_usec)
218{
219 struct gb_gpio_set_debounce_request request;
220 int ret;
221
222 request.which = which;
223 request.usec = cpu_to_le16(debounce_usec);
224 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_DEBOUNCE,
225 &request, sizeof(request), NULL, 0);
226 if (!ret)
227 ggc->lines[which].debounce_usec = debounce_usec;
228 return ret;
229}
230
231static void _gb_gpio_irq_mask(struct gb_gpio_controller *ggc, u8 hwirq)
232{
233 struct device *dev = &ggc->gbphy_dev->dev;
234 struct gb_gpio_irq_mask_request request;
235 int ret;
236
237 request.which = hwirq;
238 ret = gb_operation_sync(ggc->connection,
239 GB_GPIO_TYPE_IRQ_MASK,
240 &request, sizeof(request), NULL, 0);
241 if (ret)
242 dev_err(dev, "failed to mask irq: %d\n", ret);
243}
244
245static void _gb_gpio_irq_unmask(struct gb_gpio_controller *ggc, u8 hwirq)
246{
247 struct device *dev = &ggc->gbphy_dev->dev;
248 struct gb_gpio_irq_unmask_request request;
249 int ret;
250
251 request.which = hwirq;
252 ret = gb_operation_sync(ggc->connection,
253 GB_GPIO_TYPE_IRQ_UNMASK,
254 &request, sizeof(request), NULL, 0);
255 if (ret)
256 dev_err(dev, "failed to unmask irq: %d\n", ret);
257}
258
259static void _gb_gpio_irq_set_type(struct gb_gpio_controller *ggc,
260 u8 hwirq, u8 type)
261{
262 struct device *dev = &ggc->gbphy_dev->dev;
263 struct gb_gpio_irq_type_request request;
264 int ret;
265
266 request.which = hwirq;
267 request.type = type;
268
269 ret = gb_operation_sync(ggc->connection,
270 GB_GPIO_TYPE_IRQ_TYPE,
271 &request, sizeof(request), NULL, 0);
272 if (ret)
273 dev_err(dev, "failed to set irq type: %d\n", ret);
274}
275
276static void gb_gpio_irq_mask(struct irq_data *d)
277{
278 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
279 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
280 struct gb_gpio_line *line = &ggc->lines[d->hwirq];
281
282 line->masked = true;
283 line->masked_pending = true;
284}
285
286static void gb_gpio_irq_unmask(struct irq_data *d)
287{
288 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
289 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
290 struct gb_gpio_line *line = &ggc->lines[d->hwirq];
291
292 line->masked = false;
293 line->masked_pending = true;
294}
295
296static int gb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
297{
298 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
299 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
300 struct gb_gpio_line *line = &ggc->lines[d->hwirq];
301 struct device *dev = &ggc->gbphy_dev->dev;
302 u8 irq_type;
303
304 switch (type) {
305 case IRQ_TYPE_NONE:
306 irq_type = GB_GPIO_IRQ_TYPE_NONE;
307 break;
308 case IRQ_TYPE_EDGE_RISING:
309 irq_type = GB_GPIO_IRQ_TYPE_EDGE_RISING;
310 break;
311 case IRQ_TYPE_EDGE_FALLING:
312 irq_type = GB_GPIO_IRQ_TYPE_EDGE_FALLING;
313 break;
314 case IRQ_TYPE_EDGE_BOTH:
315 irq_type = GB_GPIO_IRQ_TYPE_EDGE_BOTH;
316 break;
317 case IRQ_TYPE_LEVEL_LOW:
318 irq_type = GB_GPIO_IRQ_TYPE_LEVEL_LOW;
319 break;
320 case IRQ_TYPE_LEVEL_HIGH:
321 irq_type = GB_GPIO_IRQ_TYPE_LEVEL_HIGH;
322 break;
323 default:
324 dev_err(dev, "unsupported irq type: %u\n", type);
325 return -EINVAL;
326 }
327
328 line->irq_type = irq_type;
329 line->irq_type_pending = true;
330
331 return 0;
332}
333
334static void gb_gpio_irq_bus_lock(struct irq_data *d)
335{
336 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
337 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
338
339 mutex_lock(&ggc->irq_lock);
340}
341
342static void gb_gpio_irq_bus_sync_unlock(struct irq_data *d)
343{
344 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
345 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
346 struct gb_gpio_line *line = &ggc->lines[d->hwirq];
347
348 if (line->irq_type_pending) {
349 _gb_gpio_irq_set_type(ggc, d->hwirq, line->irq_type);
350 line->irq_type_pending = false;
351 }
352
353 if (line->masked_pending) {
354 if (line->masked)
355 _gb_gpio_irq_mask(ggc, d->hwirq);
356 else
357 _gb_gpio_irq_unmask(ggc, d->hwirq);
358 line->masked_pending = false;
359 }
360
361 mutex_unlock(&ggc->irq_lock);
362}
363
364static int gb_gpio_request_handler(struct gb_operation *op)
365{
366 struct gb_connection *connection = op->connection;
367 struct gb_gpio_controller *ggc = gb_connection_get_data(connection);
368 struct device *dev = &ggc->gbphy_dev->dev;
369 struct gb_message *request;
370 struct gb_gpio_irq_event_request *event;
371 u8 type = op->type;
372 int irq;
373 struct irq_desc *desc;
374
375 if (type != GB_GPIO_TYPE_IRQ_EVENT) {
376 dev_err(dev, "unsupported unsolicited request: %u\n", type);
377 return -EINVAL;
378 }
379
380 request = op->request;
381
382 if (request->payload_size < sizeof(*event)) {
383 dev_err(dev, "short event received (%zu < %zu)\n",
384 request->payload_size, sizeof(*event));
385 return -EINVAL;
386 }
387
388 event = request->payload;
389 if (event->which > ggc->line_max) {
390 dev_err(dev, "invalid hw irq: %d\n", event->which);
391 return -EINVAL;
392 }
393
394 irq = irq_find_mapping(ggc->irqdomain, event->which);
395 if (!irq) {
396 dev_err(dev, "failed to find IRQ\n");
397 return -EINVAL;
398 }
399 desc = irq_to_desc(irq);
400 if (!desc) {
401 dev_err(dev, "failed to look up irq\n");
402 return -EINVAL;
403 }
404
405 local_irq_disable();
406 generic_handle_irq_desc(desc);
407 local_irq_enable();
408
409 return 0;
410}
411
412static int gb_gpio_request(struct gpio_chip *chip, unsigned int offset)
413{
414 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
415
416 return gb_gpio_activate_operation(ggc, (u8)offset);
417}
418
419static void gb_gpio_free(struct gpio_chip *chip, unsigned int offset)
420{
421 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
422
423 gb_gpio_deactivate_operation(ggc, (u8)offset);
424}
425
426static int gb_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
427{
428 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
429 u8 which;
430 int ret;
431
432 which = (u8)offset;
433 ret = gb_gpio_get_direction_operation(ggc, which);
434 if (ret)
435 return ret;
436
437 return ggc->lines[which].direction ? 1 : 0;
438}
439
440static int gb_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
441{
442 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
443
444 return gb_gpio_direction_in_operation(ggc, (u8)offset);
445}
446
447static int gb_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
448 int value)
449{
450 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
451
452 return gb_gpio_direction_out_operation(ggc, (u8)offset, !!value);
453}
454
455static int gb_gpio_get(struct gpio_chip *chip, unsigned int offset)
456{
457 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
458 u8 which;
459 int ret;
460
461 which = (u8)offset;
462 ret = gb_gpio_get_value_operation(ggc, which);
463 if (ret)
464 return ret;
465
466 return ggc->lines[which].value;
467}
468
469static void gb_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
470{
471 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
472
473 gb_gpio_set_value_operation(ggc, (u8)offset, !!value);
474}
475
476static int gb_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
477 unsigned long config)
478{
479 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
480 u32 debounce;
481
482 if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
483 return -ENOTSUPP;
484
485 debounce = pinconf_to_config_argument(config);
486 if (debounce > U16_MAX)
487 return -EINVAL;
488
489 return gb_gpio_set_debounce_operation(ggc, (u8)offset, (u16)debounce);
490}
491
492static int gb_gpio_controller_setup(struct gb_gpio_controller *ggc)
493{
494 int ret;
495
496
497 ret = gb_gpio_line_count_operation(ggc);
498 if (ret)
499 return ret;
500
501 ggc->lines = kcalloc(ggc->line_max + 1, sizeof(*ggc->lines),
502 GFP_KERNEL);
503 if (!ggc->lines)
504 return -ENOMEM;
505
506 return ret;
507}
508
509
510
511
512
513
514
515
516
517
518
519static int gb_gpio_irq_map(struct irq_domain *domain, unsigned int irq,
520 irq_hw_number_t hwirq)
521{
522 struct gpio_chip *chip = domain->host_data;
523 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
524
525 irq_set_chip_data(irq, ggc);
526 irq_set_chip_and_handler(irq, ggc->irqchip, ggc->irq_handler);
527 irq_set_noprobe(irq);
528
529
530
531
532 if (ggc->irq_default_type != IRQ_TYPE_NONE)
533 irq_set_irq_type(irq, ggc->irq_default_type);
534
535 return 0;
536}
537
538static void gb_gpio_irq_unmap(struct irq_domain *d, unsigned int irq)
539{
540 irq_set_chip_and_handler(irq, NULL, NULL);
541 irq_set_chip_data(irq, NULL);
542}
543
544static const struct irq_domain_ops gb_gpio_domain_ops = {
545 .map = gb_gpio_irq_map,
546 .unmap = gb_gpio_irq_unmap,
547};
548
549
550
551
552
553
554
555static void gb_gpio_irqchip_remove(struct gb_gpio_controller *ggc)
556{
557 unsigned int offset;
558
559
560 if (ggc->irqdomain) {
561 for (offset = 0; offset < (ggc->line_max + 1); offset++)
562 irq_dispose_mapping(irq_find_mapping(ggc->irqdomain,
563 offset));
564 irq_domain_remove(ggc->irqdomain);
565 }
566
567 if (ggc->irqchip)
568 ggc->irqchip = NULL;
569}
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591static int gb_gpio_irqchip_add(struct gpio_chip *chip,
592 struct irq_chip *irqchip,
593 unsigned int first_irq,
594 irq_flow_handler_t handler,
595 unsigned int type)
596{
597 struct gb_gpio_controller *ggc;
598 unsigned int offset;
599 unsigned int irq_base;
600
601 if (!chip || !irqchip)
602 return -EINVAL;
603
604 ggc = gpio_chip_to_gb_gpio_controller(chip);
605
606 ggc->irqchip = irqchip;
607 ggc->irq_handler = handler;
608 ggc->irq_default_type = type;
609 ggc->irqdomain = irq_domain_add_simple(NULL,
610 ggc->line_max + 1, first_irq,
611 &gb_gpio_domain_ops, chip);
612 if (!ggc->irqdomain) {
613 ggc->irqchip = NULL;
614 return -EINVAL;
615 }
616
617
618
619
620
621
622 for (offset = 0; offset < (ggc->line_max + 1); offset++) {
623 irq_base = irq_create_mapping(ggc->irqdomain, offset);
624 if (offset == 0)
625 ggc->irq_base = irq_base;
626 }
627
628 return 0;
629}
630
631static int gb_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
632{
633 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
634
635 return irq_find_mapping(ggc->irqdomain, offset);
636}
637
638static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
639 const struct gbphy_device_id *id)
640{
641 struct gb_connection *connection;
642 struct gb_gpio_controller *ggc;
643 struct gpio_chip *gpio;
644 struct irq_chip *irqc;
645 int ret;
646
647 ggc = kzalloc(sizeof(*ggc), GFP_KERNEL);
648 if (!ggc)
649 return -ENOMEM;
650
651 connection = gb_connection_create(gbphy_dev->bundle,
652 le16_to_cpu(gbphy_dev->cport_desc->id),
653 gb_gpio_request_handler);
654 if (IS_ERR(connection)) {
655 ret = PTR_ERR(connection);
656 goto exit_ggc_free;
657 }
658
659 ggc->connection = connection;
660 gb_connection_set_data(connection, ggc);
661 ggc->gbphy_dev = gbphy_dev;
662 gb_gbphy_set_data(gbphy_dev, ggc);
663
664 ret = gb_connection_enable_tx(connection);
665 if (ret)
666 goto exit_connection_destroy;
667
668 ret = gb_gpio_controller_setup(ggc);
669 if (ret)
670 goto exit_connection_disable;
671
672 irqc = &ggc->irqc;
673 irqc->irq_mask = gb_gpio_irq_mask;
674 irqc->irq_unmask = gb_gpio_irq_unmask;
675 irqc->irq_set_type = gb_gpio_irq_set_type;
676 irqc->irq_bus_lock = gb_gpio_irq_bus_lock;
677 irqc->irq_bus_sync_unlock = gb_gpio_irq_bus_sync_unlock;
678 irqc->name = "greybus_gpio";
679
680 mutex_init(&ggc->irq_lock);
681
682 gpio = &ggc->chip;
683
684 gpio->label = "greybus_gpio";
685 gpio->parent = &gbphy_dev->dev;
686 gpio->owner = THIS_MODULE;
687
688 gpio->request = gb_gpio_request;
689 gpio->free = gb_gpio_free;
690 gpio->get_direction = gb_gpio_get_direction;
691 gpio->direction_input = gb_gpio_direction_input;
692 gpio->direction_output = gb_gpio_direction_output;
693 gpio->get = gb_gpio_get;
694 gpio->set = gb_gpio_set;
695 gpio->set_config = gb_gpio_set_config;
696 gpio->to_irq = gb_gpio_to_irq;
697 gpio->base = -1;
698 gpio->ngpio = ggc->line_max + 1;
699 gpio->can_sleep = true;
700
701 ret = gb_connection_enable(connection);
702 if (ret)
703 goto exit_line_free;
704
705 ret = gb_gpio_irqchip_add(gpio, irqc, 0,
706 handle_level_irq, IRQ_TYPE_NONE);
707 if (ret) {
708 dev_err(&gbphy_dev->dev, "failed to add irq chip: %d\n", ret);
709 goto exit_line_free;
710 }
711
712 ret = gpiochip_add(gpio);
713 if (ret) {
714 dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret);
715 goto exit_gpio_irqchip_remove;
716 }
717
718 gbphy_runtime_put_autosuspend(gbphy_dev);
719 return 0;
720
721exit_gpio_irqchip_remove:
722 gb_gpio_irqchip_remove(ggc);
723exit_line_free:
724 kfree(ggc->lines);
725exit_connection_disable:
726 gb_connection_disable(connection);
727exit_connection_destroy:
728 gb_connection_destroy(connection);
729exit_ggc_free:
730 kfree(ggc);
731 return ret;
732}
733
734static void gb_gpio_remove(struct gbphy_device *gbphy_dev)
735{
736 struct gb_gpio_controller *ggc = gb_gbphy_get_data(gbphy_dev);
737 struct gb_connection *connection = ggc->connection;
738 int ret;
739
740 ret = gbphy_runtime_get_sync(gbphy_dev);
741 if (ret)
742 gbphy_runtime_get_noresume(gbphy_dev);
743
744 gb_connection_disable_rx(connection);
745 gpiochip_remove(&ggc->chip);
746 gb_gpio_irqchip_remove(ggc);
747 gb_connection_disable(connection);
748 gb_connection_destroy(connection);
749 kfree(ggc->lines);
750 kfree(ggc);
751}
752
753static const struct gbphy_device_id gb_gpio_id_table[] = {
754 { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_GPIO) },
755 { },
756};
757MODULE_DEVICE_TABLE(gbphy, gb_gpio_id_table);
758
759static struct gbphy_driver gpio_driver = {
760 .name = "gpio",
761 .probe = gb_gpio_probe,
762 .remove = gb_gpio_remove,
763 .id_table = gb_gpio_id_table,
764};
765
766module_gbphy_driver(gpio_driver);
767MODULE_LICENSE("GPL v2");
768