1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#include <rdma/ib_mad.h>
43
44#include "ehca_classes.h"
45#include "ehca_tools.h"
46#include "ehca_iverbs.h"
47#include "hcp_if.h"
48
49#define IB_MAD_STATUS_REDIRECT cpu_to_be16(0x0002)
50#define IB_MAD_STATUS_UNSUP_VERSION cpu_to_be16(0x0004)
51#define IB_MAD_STATUS_UNSUP_METHOD cpu_to_be16(0x0008)
52
53#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
54
55
56
57
58
59
60
61
62
63
64u64 ehca_define_sqp(struct ehca_shca *shca,
65 struct ehca_qp *ehca_qp,
66 struct ib_qp_init_attr *qp_init_attr)
67{
68 u32 pma_qp_nr, bma_qp_nr;
69 u64 ret;
70 u8 port = qp_init_attr->port_num;
71 int counter;
72
73 shca->sport[port - 1].port_state = IB_PORT_DOWN;
74
75 switch (qp_init_attr->qp_type) {
76 case IB_QPT_SMI:
77
78 break;
79 case IB_QPT_GSI:
80 ret = hipz_h_define_aqp1(shca->ipz_hca_handle,
81 ehca_qp->ipz_qp_handle,
82 ehca_qp->galpas.kernel,
83 (u32) qp_init_attr->port_num,
84 &pma_qp_nr, &bma_qp_nr);
85
86 if (ret != H_SUCCESS) {
87 ehca_err(&shca->ib_device,
88 "Can't define AQP1 for port %x. h_ret=%lli",
89 port, ret);
90 return ret;
91 }
92 shca->sport[port - 1].pma_qp_nr = pma_qp_nr;
93 ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x",
94 port, pma_qp_nr);
95 break;
96 default:
97 ehca_err(&shca->ib_device, "invalid qp_type=%x",
98 qp_init_attr->qp_type);
99 return H_PARAMETER;
100 }
101
102 if (ehca_nr_ports < 0)
103 return H_SUCCESS;
104
105 for (counter = 0;
106 shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
107 counter < ehca_port_act_time;
108 counter++) {
109 ehca_dbg(&shca->ib_device, "... wait until port %x is active",
110 port);
111 msleep_interruptible(1000);
112 }
113
114 if (counter == ehca_port_act_time) {
115 ehca_err(&shca->ib_device, "Port %x is not active.", port);
116 return H_HARDWARE;
117 }
118
119 return H_SUCCESS;
120}
121
122struct ib_perf {
123 struct ib_mad_hdr mad_hdr;
124 u8 reserved[40];
125 u8 data[192];
126} __attribute__ ((packed));
127
128
129struct tcslfl {
130 u32 tc:8;
131 u32 sl:4;
132 u32 fl:20;
133} __attribute__ ((packed));
134
135
136struct vertcfl {
137 u32 ver:4;
138 u32 tc:8;
139 u32 fl:20;
140} __attribute__ ((packed));
141
142static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
143 struct ib_wc *in_wc, struct ib_grh *in_grh,
144 struct ib_mad *in_mad, struct ib_mad *out_mad)
145{
146 struct ib_perf *in_perf = (struct ib_perf *)in_mad;
147 struct ib_perf *out_perf = (struct ib_perf *)out_mad;
148 struct ib_class_port_info *poi =
149 (struct ib_class_port_info *)out_perf->data;
150 struct tcslfl *tcslfl =
151 (struct tcslfl *)&poi->redirect_tcslfl;
152 struct ehca_shca *shca =
153 container_of(ibdev, struct ehca_shca, ib_device);
154 struct ehca_sport *sport = &shca->sport[port_num - 1];
155
156 ehca_dbg(ibdev, "method=%x", in_perf->mad_hdr.method);
157
158 *out_mad = *in_mad;
159
160 if (in_perf->mad_hdr.class_version != 1) {
161 ehca_warn(ibdev, "Unsupported class_version=%x",
162 in_perf->mad_hdr.class_version);
163 out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_VERSION;
164 goto perf_reply;
165 }
166
167 switch (in_perf->mad_hdr.method) {
168 case IB_MGMT_METHOD_GET:
169 case IB_MGMT_METHOD_SET:
170
171 out_perf->mad_hdr.attr_id = IB_PMA_CLASS_PORT_INFO;
172 out_perf->mad_hdr.status = IB_MAD_STATUS_REDIRECT;
173 memset(poi, 0, sizeof(*poi));
174 poi->base_version = 1;
175 poi->class_version = 1;
176 poi->resp_time_value = 18;
177
178
179 tcslfl->sl = in_wc->sl;
180 poi->redirect_lid =
181 sport->saved_attr.lid | in_wc->dlid_path_bits;
182 poi->redirect_qp = sport->pma_qp_nr;
183 poi->redirect_qkey = IB_QP1_QKEY;
184
185 ehca_query_pkey(ibdev, port_num, in_wc->pkey_index,
186 &poi->redirect_pkey);
187
188
189 if (in_grh) {
190 struct vertcfl *vertcfl =
191 (struct vertcfl *)&in_grh->version_tclass_flow;
192 memcpy(poi->redirect_gid, in_grh->dgid.raw,
193 sizeof(poi->redirect_gid));
194 tcslfl->tc = vertcfl->tc;
195 tcslfl->fl = vertcfl->fl;
196 } else
197
198 ehca_query_gid(ibdev, port_num, 0,
199 (union ib_gid *)&poi->redirect_gid);
200
201 ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
202 sport->saved_attr.lid, sport->pma_qp_nr);
203 break;
204
205 case IB_MGMT_METHOD_GET_RESP:
206 return IB_MAD_RESULT_FAILURE;
207
208 default:
209 out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_METHOD;
210 break;
211 }
212
213perf_reply:
214 out_perf->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
215
216 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
217}
218
219int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
220 struct ib_wc *in_wc, struct ib_grh *in_grh,
221 struct ib_mad *in_mad, struct ib_mad *out_mad)
222{
223 int ret;
224
225 if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
226 return IB_MAD_RESULT_FAILURE;
227
228
229 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
230 return IB_MAD_RESULT_SUCCESS;
231
232 ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
233 ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh,
234 in_mad, out_mad);
235
236 return ret;
237}
238