1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include "core_priv.h"
38
39#include <linux/slab.h>
40#include <linux/string.h>
41
42#include <rdma/ib_mad.h>
43
44struct ib_port {
45 struct kobject kobj;
46 struct ib_device *ibdev;
47 struct attribute_group gid_group;
48 struct attribute_group pkey_group;
49 u8 port_num;
50};
51
52struct port_attribute {
53 struct attribute attr;
54 ssize_t (*show)(struct ib_port *, struct port_attribute *, char *buf);
55 ssize_t (*store)(struct ib_port *, struct port_attribute *,
56 const char *buf, size_t count);
57};
58
59#define PORT_ATTR(_name, _mode, _show, _store) \
60struct port_attribute port_attr_##_name = __ATTR(_name, _mode, _show, _store)
61
62#define PORT_ATTR_RO(_name) \
63struct port_attribute port_attr_##_name = __ATTR_RO(_name)
64
65struct port_table_attribute {
66 struct port_attribute attr;
67 char name[8];
68 int index;
69};
70
71static inline int ibdev_is_alive(const struct ib_device *dev)
72{
73 return dev->reg_state == IB_DEV_REGISTERED;
74}
75
76static ssize_t port_attr_show(struct kobject *kobj,
77 struct attribute *attr, char *buf)
78{
79 struct port_attribute *port_attr =
80 container_of(attr, struct port_attribute, attr);
81 struct ib_port *p = container_of(kobj, struct ib_port, kobj);
82
83 if (!port_attr->show)
84 return -EIO;
85 if (!ibdev_is_alive(p->ibdev))
86 return -ENODEV;
87
88 return port_attr->show(p, port_attr, buf);
89}
90
91static struct sysfs_ops port_sysfs_ops = {
92 .show = port_attr_show
93};
94
95static ssize_t state_show(struct ib_port *p, struct port_attribute *unused,
96 char *buf)
97{
98 struct ib_port_attr attr;
99 ssize_t ret;
100
101 static const char *state_name[] = {
102 [IB_PORT_NOP] = "NOP",
103 [IB_PORT_DOWN] = "DOWN",
104 [IB_PORT_INIT] = "INIT",
105 [IB_PORT_ARMED] = "ARMED",
106 [IB_PORT_ACTIVE] = "ACTIVE",
107 [IB_PORT_ACTIVE_DEFER] = "ACTIVE_DEFER"
108 };
109
110 ret = ib_query_port(p->ibdev, p->port_num, &attr);
111 if (ret)
112 return ret;
113
114 return sprintf(buf, "%d: %s\n", attr.state,
115 attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ?
116 state_name[attr.state] : "UNKNOWN");
117}
118
119static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused,
120 char *buf)
121{
122 struct ib_port_attr attr;
123 ssize_t ret;
124
125 ret = ib_query_port(p->ibdev, p->port_num, &attr);
126 if (ret)
127 return ret;
128
129 return sprintf(buf, "0x%x\n", attr.lid);
130}
131
132static ssize_t lid_mask_count_show(struct ib_port *p,
133 struct port_attribute *unused,
134 char *buf)
135{
136 struct ib_port_attr attr;
137 ssize_t ret;
138
139 ret = ib_query_port(p->ibdev, p->port_num, &attr);
140 if (ret)
141 return ret;
142
143 return sprintf(buf, "%d\n", attr.lmc);
144}
145
146static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused,
147 char *buf)
148{
149 struct ib_port_attr attr;
150 ssize_t ret;
151
152 ret = ib_query_port(p->ibdev, p->port_num, &attr);
153 if (ret)
154 return ret;
155
156 return sprintf(buf, "0x%x\n", attr.sm_lid);
157}
158
159static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused,
160 char *buf)
161{
162 struct ib_port_attr attr;
163 ssize_t ret;
164
165 ret = ib_query_port(p->ibdev, p->port_num, &attr);
166 if (ret)
167 return ret;
168
169 return sprintf(buf, "%d\n", attr.sm_sl);
170}
171
172static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused,
173 char *buf)
174{
175 struct ib_port_attr attr;
176 ssize_t ret;
177
178 ret = ib_query_port(p->ibdev, p->port_num, &attr);
179 if (ret)
180 return ret;
181
182 return sprintf(buf, "0x%08x\n", attr.port_cap_flags);
183}
184
185static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
186 char *buf)
187{
188 struct ib_port_attr attr;
189 char *speed = "";
190 int rate;
191 ssize_t ret;
192
193 ret = ib_query_port(p->ibdev, p->port_num, &attr);
194 if (ret)
195 return ret;
196
197 switch (attr.active_speed) {
198 case 2: speed = " DDR"; break;
199 case 4: speed = " QDR"; break;
200 }
201
202 rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
203 if (rate < 0)
204 return -EINVAL;
205
206 return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
207 rate / 10, rate % 10 ? ".5" : "",
208 ib_width_enum_to_int(attr.active_width), speed);
209}
210
211static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
212 char *buf)
213{
214 struct ib_port_attr attr;
215
216 ssize_t ret;
217
218 ret = ib_query_port(p->ibdev, p->port_num, &attr);
219 if (ret)
220 return ret;
221
222 switch (attr.phys_state) {
223 case 1: return sprintf(buf, "1: Sleep\n");
224 case 2: return sprintf(buf, "2: Polling\n");
225 case 3: return sprintf(buf, "3: Disabled\n");
226 case 4: return sprintf(buf, "4: PortConfigurationTraining\n");
227 case 5: return sprintf(buf, "5: LinkUp\n");
228 case 6: return sprintf(buf, "6: LinkErrorRecovery\n");
229 case 7: return sprintf(buf, "7: Phy Test\n");
230 default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
231 }
232}
233
234static PORT_ATTR_RO(state);
235static PORT_ATTR_RO(lid);
236static PORT_ATTR_RO(lid_mask_count);
237static PORT_ATTR_RO(sm_lid);
238static PORT_ATTR_RO(sm_sl);
239static PORT_ATTR_RO(cap_mask);
240static PORT_ATTR_RO(rate);
241static PORT_ATTR_RO(phys_state);
242
243static struct attribute *port_default_attrs[] = {
244 &port_attr_state.attr,
245 &port_attr_lid.attr,
246 &port_attr_lid_mask_count.attr,
247 &port_attr_sm_lid.attr,
248 &port_attr_sm_sl.attr,
249 &port_attr_cap_mask.attr,
250 &port_attr_rate.attr,
251 &port_attr_phys_state.attr,
252 NULL
253};
254
255static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr,
256 char *buf)
257{
258 struct port_table_attribute *tab_attr =
259 container_of(attr, struct port_table_attribute, attr);
260 union ib_gid gid;
261 ssize_t ret;
262
263 ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid);
264 if (ret)
265 return ret;
266
267 return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
268 be16_to_cpu(((__be16 *) gid.raw)[0]),
269 be16_to_cpu(((__be16 *) gid.raw)[1]),
270 be16_to_cpu(((__be16 *) gid.raw)[2]),
271 be16_to_cpu(((__be16 *) gid.raw)[3]),
272 be16_to_cpu(((__be16 *) gid.raw)[4]),
273 be16_to_cpu(((__be16 *) gid.raw)[5]),
274 be16_to_cpu(((__be16 *) gid.raw)[6]),
275 be16_to_cpu(((__be16 *) gid.raw)[7]));
276}
277
278static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr,
279 char *buf)
280{
281 struct port_table_attribute *tab_attr =
282 container_of(attr, struct port_table_attribute, attr);
283 u16 pkey;
284 ssize_t ret;
285
286 ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey);
287 if (ret)
288 return ret;
289
290 return sprintf(buf, "0x%04x\n", pkey);
291}
292
293#define PORT_PMA_ATTR(_name, _counter, _width, _offset) \
294struct port_table_attribute port_pma_attr_##_name = { \
295 .attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \
296 .index = (_offset) | ((_width) << 16) | ((_counter) << 24) \
297}
298
299static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
300 char *buf)
301{
302 struct port_table_attribute *tab_attr =
303 container_of(attr, struct port_table_attribute, attr);
304 int offset = tab_attr->index & 0xffff;
305 int width = (tab_attr->index >> 16) & 0xff;
306 struct ib_mad *in_mad = NULL;
307 struct ib_mad *out_mad = NULL;
308 ssize_t ret;
309
310 if (!p->ibdev->process_mad)
311 return sprintf(buf, "N/A (no PMA)\n");
312
313 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
314 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
315 if (!in_mad || !out_mad) {
316 ret = -ENOMEM;
317 goto out;
318 }
319
320 in_mad->mad_hdr.base_version = 1;
321 in_mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_PERF_MGMT;
322 in_mad->mad_hdr.class_version = 1;
323 in_mad->mad_hdr.method = IB_MGMT_METHOD_GET;
324 in_mad->mad_hdr.attr_id = cpu_to_be16(0x12);
325
326 in_mad->data[41] = p->port_num;
327
328 if ((p->ibdev->process_mad(p->ibdev, IB_MAD_IGNORE_MKEY,
329 p->port_num, NULL, NULL, in_mad, out_mad) &
330 (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) !=
331 (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) {
332 ret = -EINVAL;
333 goto out;
334 }
335
336 switch (width) {
337 case 4:
338 ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >>
339 (4 - (offset % 8))) & 0xf);
340 break;
341 case 8:
342 ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]);
343 break;
344 case 16:
345 ret = sprintf(buf, "%u\n",
346 be16_to_cpup((__be16 *)(out_mad->data + 40 + offset / 8)));
347 break;
348 case 32:
349 ret = sprintf(buf, "%u\n",
350 be32_to_cpup((__be32 *)(out_mad->data + 40 + offset / 8)));
351 break;
352 default:
353 ret = 0;
354 }
355
356out:
357 kfree(in_mad);
358 kfree(out_mad);
359
360 return ret;
361}
362
363static PORT_PMA_ATTR(symbol_error , 0, 16, 32);
364static PORT_PMA_ATTR(link_error_recovery , 1, 8, 48);
365static PORT_PMA_ATTR(link_downed , 2, 8, 56);
366static PORT_PMA_ATTR(port_rcv_errors , 3, 16, 64);
367static PORT_PMA_ATTR(port_rcv_remote_physical_errors, 4, 16, 80);
368static PORT_PMA_ATTR(port_rcv_switch_relay_errors , 5, 16, 96);
369static PORT_PMA_ATTR(port_xmit_discards , 6, 16, 112);
370static PORT_PMA_ATTR(port_xmit_constraint_errors , 7, 8, 128);
371static PORT_PMA_ATTR(port_rcv_constraint_errors , 8, 8, 136);
372static PORT_PMA_ATTR(local_link_integrity_errors , 9, 4, 152);
373static PORT_PMA_ATTR(excessive_buffer_overrun_errors, 10, 4, 156);
374static PORT_PMA_ATTR(VL15_dropped , 11, 16, 176);
375static PORT_PMA_ATTR(port_xmit_data , 12, 32, 192);
376static PORT_PMA_ATTR(port_rcv_data , 13, 32, 224);
377static PORT_PMA_ATTR(port_xmit_packets , 14, 32, 256);
378static PORT_PMA_ATTR(port_rcv_packets , 15, 32, 288);
379
380static struct attribute *pma_attrs[] = {
381 &port_pma_attr_symbol_error.attr.attr,
382 &port_pma_attr_link_error_recovery.attr.attr,
383 &port_pma_attr_link_downed.attr.attr,
384 &port_pma_attr_port_rcv_errors.attr.attr,
385 &port_pma_attr_port_rcv_remote_physical_errors.attr.attr,
386 &port_pma_attr_port_rcv_switch_relay_errors.attr.attr,
387 &port_pma_attr_port_xmit_discards.attr.attr,
388 &port_pma_attr_port_xmit_constraint_errors.attr.attr,
389 &port_pma_attr_port_rcv_constraint_errors.attr.attr,
390 &port_pma_attr_local_link_integrity_errors.attr.attr,
391 &port_pma_attr_excessive_buffer_overrun_errors.attr.attr,
392 &port_pma_attr_VL15_dropped.attr.attr,
393 &port_pma_attr_port_xmit_data.attr.attr,
394 &port_pma_attr_port_rcv_data.attr.attr,
395 &port_pma_attr_port_xmit_packets.attr.attr,
396 &port_pma_attr_port_rcv_packets.attr.attr,
397 NULL
398};
399
400static struct attribute_group pma_group = {
401 .name = "counters",
402 .attrs = pma_attrs
403};
404
405static void ib_port_release(struct kobject *kobj)
406{
407 struct ib_port *p = container_of(kobj, struct ib_port, kobj);
408 struct attribute *a;
409 int i;
410
411 for (i = 0; (a = p->gid_group.attrs[i]); ++i)
412 kfree(a);
413
414 kfree(p->gid_group.attrs);
415
416 for (i = 0; (a = p->pkey_group.attrs[i]); ++i)
417 kfree(a);
418
419 kfree(p->pkey_group.attrs);
420
421 kfree(p);
422}
423
424static struct kobj_type port_type = {
425 .release = ib_port_release,
426 .sysfs_ops = &port_sysfs_ops,
427 .default_attrs = port_default_attrs
428};
429
430static void ib_device_release(struct class_device *cdev)
431{
432 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
433
434 kfree(dev);
435}
436
437static int ib_device_uevent(struct class_device *cdev,
438 struct kobj_uevent_env *env)
439{
440 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
441
442 if (add_uevent_var(env, "NAME=%s", dev->name))
443 return -ENOMEM;
444
445
446
447
448
449 return 0;
450}
451
452static struct attribute **
453alloc_group_attrs(ssize_t (*show)(struct ib_port *,
454 struct port_attribute *, char *buf),
455 int len)
456{
457 struct attribute **tab_attr;
458 struct port_table_attribute *element;
459 int i;
460
461 tab_attr = kcalloc(1 + len, sizeof(struct attribute *), GFP_KERNEL);
462 if (!tab_attr)
463 return NULL;
464
465 for (i = 0; i < len; i++) {
466 element = kzalloc(sizeof(struct port_table_attribute),
467 GFP_KERNEL);
468 if (!element)
469 goto err;
470
471 if (snprintf(element->name, sizeof(element->name),
472 "%d", i) >= sizeof(element->name)) {
473 kfree(element);
474 goto err;
475 }
476
477 element->attr.attr.name = element->name;
478 element->attr.attr.mode = S_IRUGO;
479 element->attr.show = show;
480 element->index = i;
481
482 tab_attr[i] = &element->attr.attr;
483 }
484
485 return tab_attr;
486
487err:
488 while (--i >= 0)
489 kfree(tab_attr[i]);
490 kfree(tab_attr);
491 return NULL;
492}
493
494static int add_port(struct ib_device *device, int port_num)
495{
496 struct ib_port *p;
497 struct ib_port_attr attr;
498 int i;
499 int ret;
500
501 ret = ib_query_port(device, port_num, &attr);
502 if (ret)
503 return ret;
504
505 p = kzalloc(sizeof *p, GFP_KERNEL);
506 if (!p)
507 return -ENOMEM;
508
509 p->ibdev = device;
510 p->port_num = port_num;
511 p->kobj.ktype = &port_type;
512
513 p->kobj.parent = kobject_get(&device->ports_parent);
514 if (!p->kobj.parent) {
515 ret = -EBUSY;
516 goto err;
517 }
518
519 ret = kobject_set_name(&p->kobj, "%d", port_num);
520 if (ret)
521 goto err_put;
522
523 ret = kobject_register(&p->kobj);
524 if (ret)
525 goto err_put;
526
527 ret = sysfs_create_group(&p->kobj, &pma_group);
528 if (ret)
529 goto err_put;
530
531 p->gid_group.name = "gids";
532 p->gid_group.attrs = alloc_group_attrs(show_port_gid, attr.gid_tbl_len);
533 if (!p->gid_group.attrs)
534 goto err_remove_pma;
535
536 ret = sysfs_create_group(&p->kobj, &p->gid_group);
537 if (ret)
538 goto err_free_gid;
539
540 p->pkey_group.name = "pkeys";
541 p->pkey_group.attrs = alloc_group_attrs(show_port_pkey,
542 attr.pkey_tbl_len);
543 if (!p->pkey_group.attrs)
544 goto err_remove_gid;
545
546 ret = sysfs_create_group(&p->kobj, &p->pkey_group);
547 if (ret)
548 goto err_free_pkey;
549
550 list_add_tail(&p->kobj.entry, &device->port_list);
551
552 return 0;
553
554err_free_pkey:
555 for (i = 0; i < attr.pkey_tbl_len; ++i)
556 kfree(p->pkey_group.attrs[i]);
557
558 kfree(p->pkey_group.attrs);
559
560err_remove_gid:
561 sysfs_remove_group(&p->kobj, &p->gid_group);
562
563err_free_gid:
564 for (i = 0; i < attr.gid_tbl_len; ++i)
565 kfree(p->gid_group.attrs[i]);
566
567 kfree(p->gid_group.attrs);
568
569err_remove_pma:
570 sysfs_remove_group(&p->kobj, &pma_group);
571
572err_put:
573 kobject_put(&device->ports_parent);
574
575err:
576 kfree(p);
577 return ret;
578}
579
580static ssize_t show_node_type(struct class_device *cdev, char *buf)
581{
582 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
583
584 if (!ibdev_is_alive(dev))
585 return -ENODEV;
586
587 switch (dev->node_type) {
588 case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
589 case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type);
590 case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type);
591 case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type);
592 default: return sprintf(buf, "%d: <unknown>\n", dev->node_type);
593 }
594}
595
596static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf)
597{
598 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
599 struct ib_device_attr attr;
600 ssize_t ret;
601
602 if (!ibdev_is_alive(dev))
603 return -ENODEV;
604
605 ret = ib_query_device(dev, &attr);
606 if (ret)
607 return ret;
608
609 return sprintf(buf, "%04x:%04x:%04x:%04x\n",
610 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[0]),
611 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[1]),
612 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[2]),
613 be16_to_cpu(((__be16 *) &attr.sys_image_guid)[3]));
614}
615
616static ssize_t show_node_guid(struct class_device *cdev, char *buf)
617{
618 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
619
620 if (!ibdev_is_alive(dev))
621 return -ENODEV;
622
623 return sprintf(buf, "%04x:%04x:%04x:%04x\n",
624 be16_to_cpu(((__be16 *) &dev->node_guid)[0]),
625 be16_to_cpu(((__be16 *) &dev->node_guid)[1]),
626 be16_to_cpu(((__be16 *) &dev->node_guid)[2]),
627 be16_to_cpu(((__be16 *) &dev->node_guid)[3]));
628}
629
630static ssize_t show_node_desc(struct class_device *cdev, char *buf)
631{
632 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
633
634 return sprintf(buf, "%.64s\n", dev->node_desc);
635}
636
637static ssize_t set_node_desc(struct class_device *cdev, const char *buf,
638 size_t count)
639{
640 struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
641 struct ib_device_modify desc = {};
642 int ret;
643
644 if (!dev->modify_device)
645 return -EIO;
646
647 memcpy(desc.node_desc, buf, min_t(int, count, 64));
648 ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
649 if (ret)
650 return ret;
651
652 return count;
653}
654
655static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
656static CLASS_DEVICE_ATTR(sys_image_guid, S_IRUGO, show_sys_image_guid, NULL);
657static CLASS_DEVICE_ATTR(node_guid, S_IRUGO, show_node_guid, NULL);
658static CLASS_DEVICE_ATTR(node_desc, S_IRUGO | S_IWUSR, show_node_desc,
659 set_node_desc);
660
661static struct class_device_attribute *ib_class_attributes[] = {
662 &class_device_attr_node_type,
663 &class_device_attr_sys_image_guid,
664 &class_device_attr_node_guid,
665 &class_device_attr_node_desc
666};
667
668static struct class ib_class = {
669 .name = "infiniband",
670 .release = ib_device_release,
671 .uevent = ib_device_uevent,
672};
673
674int ib_device_register_sysfs(struct ib_device *device)
675{
676 struct class_device *class_dev = &device->class_dev;
677 int ret;
678 int i;
679
680 class_dev->class = &ib_class;
681 class_dev->class_data = device;
682 class_dev->dev = device->dma_device;
683 strlcpy(class_dev->class_id, device->name, BUS_ID_SIZE);
684
685 INIT_LIST_HEAD(&device->port_list);
686
687 ret = class_device_register(class_dev);
688 if (ret)
689 goto err;
690
691 for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i) {
692 ret = class_device_create_file(class_dev, ib_class_attributes[i]);
693 if (ret)
694 goto err_unregister;
695 }
696
697 device->ports_parent.parent = kobject_get(&class_dev->kobj);
698 if (!device->ports_parent.parent) {
699 ret = -EBUSY;
700 goto err_unregister;
701 }
702 ret = kobject_set_name(&device->ports_parent, "ports");
703 if (ret)
704 goto err_put;
705 ret = kobject_register(&device->ports_parent);
706 if (ret)
707 goto err_put;
708
709 if (device->node_type == RDMA_NODE_IB_SWITCH) {
710 ret = add_port(device, 0);
711 if (ret)
712 goto err_put;
713 } else {
714 for (i = 1; i <= device->phys_port_cnt; ++i) {
715 ret = add_port(device, i);
716 if (ret)
717 goto err_put;
718 }
719 }
720
721 return 0;
722
723err_put:
724 {
725 struct kobject *p, *t;
726 struct ib_port *port;
727
728 list_for_each_entry_safe(p, t, &device->port_list, entry) {
729 list_del(&p->entry);
730 port = container_of(p, struct ib_port, kobj);
731 sysfs_remove_group(p, &pma_group);
732 sysfs_remove_group(p, &port->pkey_group);
733 sysfs_remove_group(p, &port->gid_group);
734 kobject_unregister(p);
735 }
736 }
737
738 kobject_put(&class_dev->kobj);
739
740err_unregister:
741 class_device_unregister(class_dev);
742
743err:
744 return ret;
745}
746
747void ib_device_unregister_sysfs(struct ib_device *device)
748{
749 struct kobject *p, *t;
750 struct ib_port *port;
751
752 list_for_each_entry_safe(p, t, &device->port_list, entry) {
753 list_del(&p->entry);
754 port = container_of(p, struct ib_port, kobj);
755 sysfs_remove_group(p, &pma_group);
756 sysfs_remove_group(p, &port->pkey_group);
757 sysfs_remove_group(p, &port->gid_group);
758 kobject_unregister(p);
759 }
760
761 kobject_unregister(&device->ports_parent);
762 class_device_unregister(&device->class_dev);
763}
764
765int ib_sysfs_setup(void)
766{
767 return class_register(&ib_class);
768}
769
770void ib_sysfs_cleanup(void)
771{
772 class_unregister(&ib_class);
773}
774