1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#include <net/neighbour.h>
44#include <net/netevent.h>
45
46#include <rdma/ib_addr.h>
47#include <rdma/ib_mad.h>
48#include <rdma/ib_cache.h>
49
50#include "ocrdma.h"
51#include "ocrdma_verbs.h"
52#include "ocrdma_ah.h"
53#include "ocrdma_hw.h"
54#include "ocrdma_stats.h"
55
56#define OCRDMA_VID_PCP_SHIFT 0xD
57
58static u16 ocrdma_hdr_type_to_proto_num(int devid, u8 hdr_type)
59{
60 switch (hdr_type) {
61 case OCRDMA_L3_TYPE_IB_GRH:
62 return (u16)ETH_P_IBOE;
63 case OCRDMA_L3_TYPE_IPV4:
64 return (u16)0x0800;
65 case OCRDMA_L3_TYPE_IPV6:
66 return (u16)0x86dd;
67 default:
68 pr_err("ocrdma%d: Invalid network header\n", devid);
69 return 0;
70 }
71}
72
73static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
74 struct ib_ah_attr *attr, union ib_gid *sgid,
75 int pdid, bool *isvlan, u16 vlan_tag)
76{
77 int status;
78 struct ocrdma_eth_vlan eth;
79 struct ocrdma_grh grh;
80 int eth_sz;
81 u16 proto_num = 0;
82 u8 nxthdr = 0x11;
83 struct iphdr ipv4;
84 union {
85 struct sockaddr _sockaddr;
86 struct sockaddr_in _sockaddr_in;
87 struct sockaddr_in6 _sockaddr_in6;
88 } sgid_addr, dgid_addr;
89
90 memset(ð, 0, sizeof(eth));
91 memset(&grh, 0, sizeof(grh));
92
93
94 proto_num = ocrdma_hdr_type_to_proto_num(dev->id, ah->hdr_type);
95 if (!proto_num)
96 return -EINVAL;
97 nxthdr = (proto_num == ETH_P_IBOE) ? 0x1b : 0x11;
98
99 if (!vlan_tag || (vlan_tag > 0xFFF))
100 vlan_tag = dev->pvid;
101 if (vlan_tag || dev->pfc_state) {
102 if (!vlan_tag) {
103 pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
104 dev->id);
105 pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
106 dev->id);
107 }
108 eth.eth_type = cpu_to_be16(0x8100);
109 eth.roce_eth_type = cpu_to_be16(proto_num);
110 vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
111 eth.vlan_tag = cpu_to_be16(vlan_tag);
112 eth_sz = sizeof(struct ocrdma_eth_vlan);
113 *isvlan = true;
114 } else {
115 eth.eth_type = cpu_to_be16(proto_num);
116 eth_sz = sizeof(struct ocrdma_eth_basic);
117 }
118
119 memcpy(ð.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
120 status = ocrdma_resolve_dmac(dev, attr, ð.dmac[0]);
121 if (status)
122 return status;
123 ah->sgid_index = attr->grh.sgid_index;
124
125 memcpy(&ah->av->eth_hdr, ð, eth_sz);
126 if (ah->hdr_type == RDMA_NETWORK_IPV4) {
127 *((__be16 *)&ipv4) = htons((4 << 12) | (5 << 8) |
128 attr->grh.traffic_class);
129 ipv4.id = cpu_to_be16(pdid);
130 ipv4.frag_off = htons(IP_DF);
131 ipv4.tot_len = htons(0);
132 ipv4.ttl = attr->grh.hop_limit;
133 ipv4.protocol = nxthdr;
134 rdma_gid2ip(&sgid_addr._sockaddr, sgid);
135 ipv4.saddr = sgid_addr._sockaddr_in.sin_addr.s_addr;
136 rdma_gid2ip(&dgid_addr._sockaddr, &attr->grh.dgid);
137 ipv4.daddr = dgid_addr._sockaddr_in.sin_addr.s_addr;
138 memcpy((u8 *)ah->av + eth_sz, &ipv4, sizeof(struct iphdr));
139 } else {
140 memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
141 grh.tclass_flow = cpu_to_be32((6 << 28) |
142 (attr->grh.traffic_class << 24) |
143 attr->grh.flow_label);
144 memcpy(&grh.dgid[0], attr->grh.dgid.raw,
145 sizeof(attr->grh.dgid.raw));
146 grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
147 (nxthdr << 8) |
148 attr->grh.hop_limit);
149 memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
150 }
151 if (*isvlan)
152 ah->av->valid |= OCRDMA_AV_VLAN_VALID;
153 ah->av->valid = cpu_to_le32(ah->av->valid);
154 return status;
155}
156
157struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
158 struct ib_udata *udata)
159{
160 u32 *ahid_addr;
161 int status;
162 struct ocrdma_ah *ah;
163 bool isvlan = false;
164 u16 vlan_tag = 0xffff;
165 struct ib_gid_attr sgid_attr;
166 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
167 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
168 union ib_gid sgid;
169
170 if (!(attr->ah_flags & IB_AH_GRH))
171 return ERR_PTR(-EINVAL);
172
173 if (atomic_cmpxchg(&dev->update_sl, 1, 0))
174 ocrdma_init_service_level(dev);
175
176 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
177 if (!ah)
178 return ERR_PTR(-ENOMEM);
179
180 status = ocrdma_alloc_av(dev, ah);
181 if (status)
182 goto av_err;
183
184 status = ib_get_cached_gid(&dev->ibdev, 1, attr->grh.sgid_index, &sgid,
185 &sgid_attr);
186 if (status) {
187 pr_err("%s(): Failed to query sgid, status = %d\n",
188 __func__, status);
189 goto av_conf_err;
190 }
191 if (sgid_attr.ndev) {
192 if (is_vlan_dev(sgid_attr.ndev))
193 vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
194 dev_put(sgid_attr.ndev);
195 }
196
197 ah->hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
198
199 if ((pd->uctx) &&
200 (!rdma_is_multicast_addr((struct in6_addr *)attr->grh.dgid.raw)) &&
201 (!rdma_link_local_addr((struct in6_addr *)attr->grh.dgid.raw))) {
202 status = rdma_addr_find_l2_eth_by_grh(&sgid, &attr->grh.dgid,
203 attr->dmac, &vlan_tag,
204 &sgid_attr.ndev->ifindex,
205 NULL);
206 if (status) {
207 pr_err("%s(): Failed to resolve dmac from gid."
208 "status = %d\n", __func__, status);
209 goto av_conf_err;
210 }
211 }
212
213 status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan, vlan_tag);
214 if (status)
215 goto av_conf_err;
216
217
218 if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
219 ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
220 *ahid_addr = 0;
221 *ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
222 if (ocrdma_is_udp_encap_supported(dev)) {
223 *ahid_addr |= ((u32)ah->hdr_type &
224 OCRDMA_AH_L3_TYPE_MASK) <<
225 OCRDMA_AH_L3_TYPE_SHIFT;
226 }
227 if (isvlan)
228 *ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
229 OCRDMA_AH_VLAN_VALID_SHIFT);
230 }
231
232 return &ah->ibah;
233
234av_conf_err:
235 ocrdma_free_av(dev, ah);
236av_err:
237 kfree(ah);
238 return ERR_PTR(status);
239}
240
241int ocrdma_destroy_ah(struct ib_ah *ibah)
242{
243 struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
244 struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
245
246 ocrdma_free_av(dev, ah);
247 kfree(ah);
248 return 0;
249}
250
251int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
252{
253 struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
254 struct ocrdma_av *av = ah->av;
255 struct ocrdma_grh *grh;
256 attr->ah_flags |= IB_AH_GRH;
257 if (ah->av->valid & OCRDMA_AV_VALID) {
258 grh = (struct ocrdma_grh *)((u8 *)ah->av +
259 sizeof(struct ocrdma_eth_vlan));
260 attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13;
261 } else {
262 grh = (struct ocrdma_grh *)((u8 *)ah->av +
263 sizeof(struct ocrdma_eth_basic));
264 attr->sl = 0;
265 }
266 memcpy(&attr->grh.dgid.raw[0], &grh->dgid[0], sizeof(grh->dgid));
267 attr->grh.sgid_index = ah->sgid_index;
268 attr->grh.hop_limit = be32_to_cpu(grh->pdid_hoplimit) & 0xff;
269 attr->grh.traffic_class = be32_to_cpu(grh->tclass_flow) >> 24;
270 attr->grh.flow_label = be32_to_cpu(grh->tclass_flow) & 0x00ffffffff;
271 return 0;
272}
273
274int ocrdma_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
275{
276
277 return -ENOSYS;
278}
279
280int ocrdma_process_mad(struct ib_device *ibdev,
281 int process_mad_flags,
282 u8 port_num,
283 const struct ib_wc *in_wc,
284 const struct ib_grh *in_grh,
285 const struct ib_mad_hdr *in, size_t in_mad_size,
286 struct ib_mad_hdr *out, size_t *out_mad_size,
287 u16 *out_mad_pkey_index)
288{
289 int status;
290 struct ocrdma_dev *dev;
291 const struct ib_mad *in_mad = (const struct ib_mad *)in;
292 struct ib_mad *out_mad = (struct ib_mad *)out;
293
294 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
295 *out_mad_size != sizeof(*out_mad)))
296 return IB_MAD_RESULT_FAILURE;
297
298 switch (in_mad->mad_hdr.mgmt_class) {
299 case IB_MGMT_CLASS_PERF_MGMT:
300 dev = get_ocrdma_dev(ibdev);
301 if (!ocrdma_pma_counters(dev, out_mad))
302 status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
303 else
304 status = IB_MAD_RESULT_SUCCESS;
305 break;
306 default:
307 status = IB_MAD_RESULT_SUCCESS;
308 break;
309 }
310 return status;
311}
312