1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <rdma/ib_addr.h>
34#include <rdma/ib_cache.h>
35
36#include <linux/slab.h>
37#include <linux/inet.h>
38#include <linux/string.h>
39#include <linux/mlx4/driver.h>
40
41#include "mlx4_ib.h"
42
43static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
44 struct mlx4_ib_ah *ah)
45{
46 struct mlx4_dev *dev = to_mdev(pd->device)->dev;
47
48 ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
49 ah->av.ib.g_slid = ah_attr->src_path_bits;
50 ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
51 if (ah_attr->ah_flags & IB_AH_GRH) {
52 ah->av.ib.g_slid |= 0x80;
53 ah->av.ib.gid_index = ah_attr->grh.sgid_index;
54 ah->av.ib.hop_limit = ah_attr->grh.hop_limit;
55 ah->av.ib.sl_tclass_flowlabel |=
56 cpu_to_be32((ah_attr->grh.traffic_class << 20) |
57 ah_attr->grh.flow_label);
58 memcpy(ah->av.ib.dgid, ah_attr->grh.dgid.raw, 16);
59 }
60
61 ah->av.ib.dlid = cpu_to_be16(ah_attr->dlid);
62 if (ah_attr->static_rate) {
63 ah->av.ib.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
64 while (ah->av.ib.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
65 !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
66 --ah->av.ib.stat_rate;
67 }
68
69 return &ah->ibah;
70}
71
72static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
73 struct mlx4_ib_ah *ah)
74{
75 struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
76 struct mlx4_dev *dev = ibdev->dev;
77 int is_mcast = 0;
78 struct in6_addr in6;
79 u16 vlan_tag = 0xffff;
80 union ib_gid sgid;
81 struct ib_gid_attr gid_attr;
82 int ret;
83
84 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
85 if (rdma_is_multicast_addr(&in6)) {
86 is_mcast = 1;
87 rdma_get_mcast_mac(&in6, ah->av.eth.mac);
88 } else {
89 memcpy(ah->av.eth.mac, ah_attr->dmac, ETH_ALEN);
90 }
91 ret = ib_get_cached_gid(pd->device, ah_attr->port_num,
92 ah_attr->grh.sgid_index, &sgid, &gid_attr);
93 if (ret)
94 return ERR_PTR(ret);
95 eth_zero_addr(ah->av.eth.s_mac);
96 if (gid_attr.ndev) {
97 if (is_vlan_dev(gid_attr.ndev))
98 vlan_tag = vlan_dev_vlan_id(gid_attr.ndev);
99 memcpy(ah->av.eth.s_mac, gid_attr.ndev->dev_addr, ETH_ALEN);
100 dev_put(gid_attr.ndev);
101 }
102 if (vlan_tag < 0x1000)
103 vlan_tag |= (ah_attr->sl & 7) << 13;
104 ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
105 ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
106 ah->av.eth.vlan = cpu_to_be16(vlan_tag);
107 ah->av.eth.hop_limit = ah_attr->grh.hop_limit;
108 if (ah_attr->static_rate) {
109 ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
110 while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
111 !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support))
112 --ah->av.eth.stat_rate;
113 }
114
115
116
117
118 if (is_mcast)
119 ah->av.ib.dlid = cpu_to_be16(0xc000);
120
121 memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
122 ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29);
123
124 return &ah->ibah;
125}
126
127struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
128{
129 struct mlx4_ib_ah *ah;
130 struct ib_ah *ret;
131
132 ah = kzalloc(sizeof *ah, GFP_ATOMIC);
133 if (!ah)
134 return ERR_PTR(-ENOMEM);
135
136 if (rdma_port_get_link_layer(pd->device, ah_attr->port_num) == IB_LINK_LAYER_ETHERNET) {
137 if (!(ah_attr->ah_flags & IB_AH_GRH)) {
138 ret = ERR_PTR(-EINVAL);
139 } else {
140
141
142
143
144
145
146
147
148 ret = create_iboe_ah(pd, ah_attr, ah);
149 }
150
151 if (IS_ERR(ret))
152 kfree(ah);
153
154 return ret;
155 } else
156 return create_ib_ah(pd, ah_attr, ah);
157}
158
159int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
160{
161 struct mlx4_ib_ah *ah = to_mah(ibah);
162 enum rdma_link_layer ll;
163
164 memset(ah_attr, 0, sizeof *ah_attr);
165 ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24;
166 ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num);
167 if (ll == IB_LINK_LAYER_ETHERNET)
168 ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29;
169 else
170 ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
171
172 ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0;
173 if (ah->av.ib.stat_rate)
174 ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET;
175 ah_attr->src_path_bits = ah->av.ib.g_slid & 0x7F;
176
177 if (mlx4_ib_ah_grh_present(ah)) {
178 ah_attr->ah_flags = IB_AH_GRH;
179
180 ah_attr->grh.traffic_class =
181 be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20;
182 ah_attr->grh.flow_label =
183 be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) & 0xfffff;
184 ah_attr->grh.hop_limit = ah->av.ib.hop_limit;
185 ah_attr->grh.sgid_index = ah->av.ib.gid_index;
186 memcpy(ah_attr->grh.dgid.raw, ah->av.ib.dgid, 16);
187 }
188
189 return 0;
190}
191
192int mlx4_ib_destroy_ah(struct ib_ah *ah)
193{
194 kfree(to_mah(ah));
195 return 0;
196}
197