1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <asm/hvcall.h>
46#include "ehca_tools.h"
47#include "hcp_if.h"
48#include "hcp_phyp.h"
49#include "hipz_fns.h"
50#include "ipz_pt_fn.h"
51
52#define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11)
53#define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12)
54#define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15)
55#define H_ALL_RES_QP_STORAGE EHCA_BMASK_IBM(16, 17)
56#define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18)
57#define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21)
58#define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23)
59#define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31)
60#define H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE EHCA_BMASK_IBM(32, 35)
61#define H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE EHCA_BMASK_IBM(36, 39)
62#define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63)
63
64#define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15)
65#define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
66#define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
67#define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
68
69#define H_ALL_RES_QP_UD_AV_LKEY EHCA_BMASK_IBM(32, 63)
70#define H_ALL_RES_QP_SRQ_QP_TOKEN EHCA_BMASK_IBM(0, 31)
71#define H_ALL_RES_QP_SRQ_QP_HANDLE EHCA_BMASK_IBM(0, 64)
72#define H_ALL_RES_QP_SRQ_LIMIT EHCA_BMASK_IBM(48, 63)
73#define H_ALL_RES_QP_SRQ_QPN EHCA_BMASK_IBM(40, 63)
74
75#define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
76#define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
77#define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
78#define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31)
79
80#define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
81#define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
82
83#define H_MP_INIT_TYPE EHCA_BMASK_IBM(44, 47)
84#define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48)
85#define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49)
86
87#define HCALL4_REGS_FORMAT "r4=%lx r5=%lx r6=%lx r7=%lx"
88#define HCALL7_REGS_FORMAT HCALL4_REGS_FORMAT " r8=%lx r9=%lx r10=%lx"
89#define HCALL9_REGS_FORMAT HCALL7_REGS_FORMAT " r11=%lx r12=%lx"
90
91static DEFINE_SPINLOCK(hcall_lock);
92
93static u32 get_longbusy_msecs(int longbusy_rc)
94{
95 switch (longbusy_rc) {
96 case H_LONG_BUSY_ORDER_1_MSEC:
97 return 1;
98 case H_LONG_BUSY_ORDER_10_MSEC:
99 return 10;
100 case H_LONG_BUSY_ORDER_100_MSEC:
101 return 100;
102 case H_LONG_BUSY_ORDER_1_SEC:
103 return 1000;
104 case H_LONG_BUSY_ORDER_10_SEC:
105 return 10000;
106 case H_LONG_BUSY_ORDER_100_SEC:
107 return 100000;
108 default:
109 return 1;
110 }
111}
112
113static long ehca_plpar_hcall_norets(unsigned long opcode,
114 unsigned long arg1,
115 unsigned long arg2,
116 unsigned long arg3,
117 unsigned long arg4,
118 unsigned long arg5,
119 unsigned long arg6,
120 unsigned long arg7)
121{
122 long ret;
123 int i, sleep_msecs;
124 unsigned long flags = 0;
125
126 if (unlikely(ehca_debug_level >= 2))
127 ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
128 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
129
130 for (i = 0; i < 5; i++) {
131
132 if (ehca_lock_hcalls)
133 spin_lock_irqsave(&hcall_lock, flags);
134
135 ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
136 arg5, arg6, arg7);
137
138 if (ehca_lock_hcalls)
139 spin_unlock_irqrestore(&hcall_lock, flags);
140
141 if (H_IS_LONG_BUSY(ret)) {
142 sleep_msecs = get_longbusy_msecs(ret);
143 msleep_interruptible(sleep_msecs);
144 continue;
145 }
146
147 if (ret < H_SUCCESS)
148 ehca_gen_err("opcode=%lx ret=%li " HCALL7_REGS_FORMAT,
149 opcode, ret, arg1, arg2, arg3,
150 arg4, arg5, arg6, arg7);
151 else
152 if (unlikely(ehca_debug_level >= 2))
153 ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
154
155 return ret;
156 }
157
158 return H_BUSY;
159}
160
161static long ehca_plpar_hcall9(unsigned long opcode,
162 unsigned long *outs,
163 unsigned long arg1,
164 unsigned long arg2,
165 unsigned long arg3,
166 unsigned long arg4,
167 unsigned long arg5,
168 unsigned long arg6,
169 unsigned long arg7,
170 unsigned long arg8,
171 unsigned long arg9)
172{
173 long ret;
174 int i, sleep_msecs;
175 unsigned long flags = 0;
176
177 if (unlikely(ehca_debug_level >= 2))
178 ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
179 arg1, arg2, arg3, arg4, arg5,
180 arg6, arg7, arg8, arg9);
181
182 for (i = 0; i < 5; i++) {
183
184 if (ehca_lock_hcalls)
185 spin_lock_irqsave(&hcall_lock, flags);
186
187 ret = plpar_hcall9(opcode, outs,
188 arg1, arg2, arg3, arg4, arg5,
189 arg6, arg7, arg8, arg9);
190
191 if (ehca_lock_hcalls)
192 spin_unlock_irqrestore(&hcall_lock, flags);
193
194 if (H_IS_LONG_BUSY(ret)) {
195 sleep_msecs = get_longbusy_msecs(ret);
196 msleep_interruptible(sleep_msecs);
197 continue;
198 }
199
200 if (ret < H_SUCCESS) {
201 ehca_gen_err("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT,
202 opcode, arg1, arg2, arg3, arg4, arg5,
203 arg6, arg7, arg8, arg9);
204 ehca_gen_err("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
205 ret, outs[0], outs[1], outs[2], outs[3],
206 outs[4], outs[5], outs[6], outs[7],
207 outs[8]);
208 } else if (unlikely(ehca_debug_level >= 2))
209 ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
210 ret, outs[0], outs[1], outs[2], outs[3],
211 outs[4], outs[5], outs[6], outs[7],
212 outs[8]);
213 return ret;
214 }
215
216 return H_BUSY;
217}
218
219u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
220 struct ehca_pfeq *pfeq,
221 const u32 neq_control,
222 const u32 number_of_entries,
223 struct ipz_eq_handle *eq_handle,
224 u32 *act_nr_of_entries,
225 u32 *act_pages,
226 u32 *eq_ist)
227{
228 u64 ret;
229 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
230 u64 allocate_controls;
231
232
233 allocate_controls = 3ULL;
234
235
236 if (neq_control != 1)
237 allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
238 else
239 allocate_controls = (1ULL << 63) | allocate_controls;
240
241 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
242 adapter_handle.handle,
243 allocate_controls,
244 number_of_entries,
245 0, 0, 0, 0, 0, 0);
246 eq_handle->handle = outs[0];
247 *act_nr_of_entries = (u32)outs[3];
248 *act_pages = (u32)outs[4];
249 *eq_ist = (u32)outs[5];
250
251 if (ret == H_NOT_ENOUGH_RESOURCES)
252 ehca_gen_err("Not enough resource - ret=%lli ", ret);
253
254 return ret;
255}
256
257u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
258 struct ipz_eq_handle eq_handle,
259 const u64 event_mask)
260{
261 return ehca_plpar_hcall_norets(H_RESET_EVENTS,
262 adapter_handle.handle,
263 eq_handle.handle,
264 event_mask,
265 0, 0, 0, 0);
266}
267
268u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
269 struct ehca_cq *cq,
270 struct ehca_alloc_cq_parms *param)
271{
272 int rc;
273 u64 ret;
274 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
275
276 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
277 adapter_handle.handle,
278 2,
279 param->eq_handle.handle,
280 cq->token,
281 param->nr_cqe,
282 0, 0, 0, 0);
283 cq->ipz_cq_handle.handle = outs[0];
284 param->act_nr_of_entries = (u32)outs[3];
285 param->act_pages = (u32)outs[4];
286
287 if (ret == H_SUCCESS) {
288 rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
289 if (rc) {
290 ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
291 rc, outs[5]);
292
293 ehca_plpar_hcall_norets(H_FREE_RESOURCE,
294 adapter_handle.handle,
295 cq->ipz_cq_handle.handle,
296 0, 0, 0, 0, 0);
297 ret = H_NO_MEM;
298 }
299 }
300
301 if (ret == H_NOT_ENOUGH_RESOURCES)
302 ehca_gen_err("Not enough resources. ret=%lli", ret);
303
304 return ret;
305}
306
307u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
308 struct ehca_alloc_qp_parms *parms, int is_user)
309{
310 int rc;
311 u64 ret;
312 u64 allocate_controls, max_r10_reg, r11, r12;
313 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
314
315 allocate_controls =
316 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
317 | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
318 | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
319 | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
320 | EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage)
321 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE,
322 parms->squeue.page_size)
323 | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE,
324 parms->rqueue.page_size)
325 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
326 !!(parms->ll_comp_flags & LLQP_RECV_COMP))
327 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
328 !!(parms->ll_comp_flags & LLQP_SEND_COMP))
329 | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
330 parms->ud_av_l_key_ctl)
331 | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
332
333 max_r10_reg =
334 EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
335 parms->squeue.max_wr + 1)
336 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
337 parms->rqueue.max_wr + 1)
338 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
339 parms->squeue.max_sge)
340 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
341 parms->rqueue.max_sge);
342
343 r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);
344
345 if (parms->ext_type == EQPT_SRQ)
346 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
347 else
348 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);
349
350 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
351 adapter_handle.handle,
352 allocate_controls,
353 parms->send_cq_handle.handle,
354 parms->recv_cq_handle.handle,
355 parms->eq_handle.handle,
356 ((u64)parms->token << 32) | parms->pd.value,
357 max_r10_reg, r11, r12);
358
359 parms->qp_handle.handle = outs[0];
360 parms->real_qp_num = (u32)outs[1];
361 parms->squeue.act_nr_wqes =
362 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
363 parms->rqueue.act_nr_wqes =
364 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
365 parms->squeue.act_nr_sges =
366 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
367 parms->rqueue.act_nr_sges =
368 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
369 parms->squeue.queue_size =
370 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
371 parms->rqueue.queue_size =
372 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
373
374 if (ret == H_SUCCESS) {
375 rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
376 if (rc) {
377 ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
378 rc, outs[6]);
379
380 ehca_plpar_hcall_norets(H_FREE_RESOURCE,
381 adapter_handle.handle,
382 parms->qp_handle.handle,
383 0, 0, 0, 0, 0);
384 ret = H_NO_MEM;
385 }
386 }
387
388 if (ret == H_NOT_ENOUGH_RESOURCES)
389 ehca_gen_err("Not enough resources. ret=%lli", ret);
390
391 return ret;
392}
393
394u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
395 const u8 port_id,
396 struct hipz_query_port *query_port_response_block)
397{
398 u64 ret;
399 u64 r_cb = virt_to_abs(query_port_response_block);
400
401 if (r_cb & (EHCA_PAGESIZE-1)) {
402 ehca_gen_err("response block not page aligned");
403 return H_PARAMETER;
404 }
405
406 ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
407 adapter_handle.handle,
408 port_id,
409 r_cb,
410 0, 0, 0, 0);
411
412 if (ehca_debug_level >= 2)
413 ehca_dmp(query_port_response_block, 64, "response_block");
414
415 return ret;
416}
417
418u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
419 const u8 port_id, const u32 port_cap,
420 const u8 init_type, const int modify_mask)
421{
422 u64 port_attributes = port_cap;
423
424 if (modify_mask & IB_PORT_SHUTDOWN)
425 port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1);
426 if (modify_mask & IB_PORT_INIT_TYPE)
427 port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type);
428 if (modify_mask & IB_PORT_RESET_QKEY_CNTR)
429 port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1);
430
431 return ehca_plpar_hcall_norets(H_MODIFY_PORT,
432 adapter_handle.handle,
433 port_id,
434 port_attributes,
435 0, 0, 0, 0);
436}
437
438u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
439 struct hipz_query_hca *query_hca_rblock)
440{
441 u64 r_cb = virt_to_abs(query_hca_rblock);
442
443 if (r_cb & (EHCA_PAGESIZE-1)) {
444 ehca_gen_err("response_block=%p not page aligned",
445 query_hca_rblock);
446 return H_PARAMETER;
447 }
448
449 return ehca_plpar_hcall_norets(H_QUERY_HCA,
450 adapter_handle.handle,
451 r_cb,
452 0, 0, 0, 0, 0);
453}
454
455u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
456 const u8 pagesize,
457 const u8 queue_type,
458 const u64 resource_handle,
459 const u64 logical_address_of_page,
460 u64 count)
461{
462 return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
463 adapter_handle.handle,
464 (u64)queue_type | ((u64)pagesize) << 8,
465
466 resource_handle,
467 logical_address_of_page,
468 count,
469 0, 0);
470}
471
472u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
473 const struct ipz_eq_handle eq_handle,
474 struct ehca_pfeq *pfeq,
475 const u8 pagesize,
476 const u8 queue_type,
477 const u64 logical_address_of_page,
478 const u64 count)
479{
480 if (count != 1) {
481 ehca_gen_err("Ppage counter=%llx", count);
482 return H_PARAMETER;
483 }
484 return hipz_h_register_rpage(adapter_handle,
485 pagesize,
486 queue_type,
487 eq_handle.handle,
488 logical_address_of_page, count);
489}
490
491u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
492 u32 ist)
493{
494 u64 ret;
495 ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
496 adapter_handle.handle,
497 ist,
498 0, 0, 0, 0, 0);
499
500 if (ret != H_SUCCESS && ret != H_BUSY)
501 ehca_gen_err("Could not query interrupt state.");
502
503 return ret;
504}
505
506u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
507 const struct ipz_cq_handle cq_handle,
508 struct ehca_pfcq *pfcq,
509 const u8 pagesize,
510 const u8 queue_type,
511 const u64 logical_address_of_page,
512 const u64 count,
513 const struct h_galpa gal)
514{
515 if (count != 1) {
516 ehca_gen_err("Page counter=%llx", count);
517 return H_PARAMETER;
518 }
519
520 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
521 cq_handle.handle, logical_address_of_page,
522 count);
523}
524
525u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
526 const struct ipz_qp_handle qp_handle,
527 struct ehca_pfqp *pfqp,
528 const u8 pagesize,
529 const u8 queue_type,
530 const u64 logical_address_of_page,
531 const u64 count,
532 const struct h_galpa galpa)
533{
534 if (count > 1) {
535 ehca_gen_err("Page counter=%llx", count);
536 return H_PARAMETER;
537 }
538
539 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
540 qp_handle.handle, logical_address_of_page,
541 count);
542}
543
544u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
545 const struct ipz_qp_handle qp_handle,
546 struct ehca_pfqp *pfqp,
547 void **log_addr_next_sq_wqe2processed,
548 void **log_addr_next_rq_wqe2processed,
549 int dis_and_get_function_code)
550{
551 u64 ret;
552 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
553
554 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
555 adapter_handle.handle,
556 dis_and_get_function_code,
557 qp_handle.handle,
558 0, 0, 0, 0, 0, 0);
559 if (log_addr_next_sq_wqe2processed)
560 *log_addr_next_sq_wqe2processed = (void *)outs[0];
561 if (log_addr_next_rq_wqe2processed)
562 *log_addr_next_rq_wqe2processed = (void *)outs[1];
563
564 return ret;
565}
566
567u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
568 const struct ipz_qp_handle qp_handle,
569 struct ehca_pfqp *pfqp,
570 const u64 update_mask,
571 struct hcp_modify_qp_control_block *mqpcb,
572 struct h_galpa gal)
573{
574 u64 ret;
575 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
576 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
577 adapter_handle.handle,
578 qp_handle.handle,
579 update_mask,
580 virt_to_abs(mqpcb),
581 0, 0, 0, 0, 0);
582
583 if (ret == H_NOT_ENOUGH_RESOURCES)
584 ehca_gen_err("Insufficient resources ret=%lli", ret);
585
586 return ret;
587}
588
589u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
590 const struct ipz_qp_handle qp_handle,
591 struct ehca_pfqp *pfqp,
592 struct hcp_modify_qp_control_block *qqpcb,
593 struct h_galpa gal)
594{
595 return ehca_plpar_hcall_norets(H_QUERY_QP,
596 adapter_handle.handle,
597 qp_handle.handle,
598 virt_to_abs(qqpcb),
599 0, 0, 0, 0);
600}
601
602u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
603 struct ehca_qp *qp)
604{
605 u64 ret;
606 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
607
608 ret = hcp_galpas_dtor(&qp->galpas);
609 if (ret) {
610 ehca_gen_err("Could not destruct qp->galpas");
611 return H_RESOURCE;
612 }
613 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
614 adapter_handle.handle,
615
616 1,
617 qp->ipz_qp_handle.handle,
618 0, 0, 0, 0, 0, 0);
619 if (ret == H_HARDWARE)
620 ehca_gen_err("HCA not operational. ret=%lli", ret);
621
622 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
623 adapter_handle.handle,
624 qp->ipz_qp_handle.handle,
625 0, 0, 0, 0, 0);
626
627 if (ret == H_RESOURCE)
628 ehca_gen_err("Resource still in use. ret=%lli", ret);
629
630 return ret;
631}
632
633u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
634 const struct ipz_qp_handle qp_handle,
635 struct h_galpa gal,
636 u32 port)
637{
638 return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
639 adapter_handle.handle,
640 qp_handle.handle,
641 port,
642 0, 0, 0, 0);
643}
644
645u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
646 const struct ipz_qp_handle qp_handle,
647 struct h_galpa gal,
648 u32 port, u32 * pma_qp_nr,
649 u32 * bma_qp_nr)
650{
651 u64 ret;
652 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
653
654 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
655 adapter_handle.handle,
656 qp_handle.handle,
657 port,
658 0, 0, 0, 0, 0, 0);
659 *pma_qp_nr = (u32)outs[0];
660 *bma_qp_nr = (u32)outs[1];
661
662 if (ret == H_ALIAS_EXIST)
663 ehca_gen_err("AQP1 already exists. ret=%lli", ret);
664
665 return ret;
666}
667
668u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
669 const struct ipz_qp_handle qp_handle,
670 struct h_galpa gal,
671 u16 mcg_dlid,
672 u64 subnet_prefix, u64 interface_id)
673{
674 u64 ret;
675
676 ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
677 adapter_handle.handle,
678 qp_handle.handle,
679 mcg_dlid,
680 interface_id,
681 subnet_prefix,
682 0, 0);
683
684 if (ret == H_NOT_ENOUGH_RESOURCES)
685 ehca_gen_err("Not enough resources. ret=%lli", ret);
686
687 return ret;
688}
689
690u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
691 const struct ipz_qp_handle qp_handle,
692 struct h_galpa gal,
693 u16 mcg_dlid,
694 u64 subnet_prefix, u64 interface_id)
695{
696 return ehca_plpar_hcall_norets(H_DETACH_MCQP,
697 adapter_handle.handle,
698 qp_handle.handle,
699 mcg_dlid,
700 interface_id,
701 subnet_prefix,
702 0, 0);
703}
704
705u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
706 struct ehca_cq *cq,
707 u8 force_flag)
708{
709 u64 ret;
710
711 ret = hcp_galpas_dtor(&cq->galpas);
712 if (ret) {
713 ehca_gen_err("Could not destruct cp->galpas");
714 return H_RESOURCE;
715 }
716
717 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
718 adapter_handle.handle,
719 cq->ipz_cq_handle.handle,
720 force_flag != 0 ? 1L : 0L,
721 0, 0, 0, 0);
722
723 if (ret == H_RESOURCE)
724 ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret);
725
726 return ret;
727}
728
729u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
730 struct ehca_eq *eq)
731{
732 u64 ret;
733
734 ret = hcp_galpas_dtor(&eq->galpas);
735 if (ret) {
736 ehca_gen_err("Could not destruct eq->galpas");
737 return H_RESOURCE;
738 }
739
740 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
741 adapter_handle.handle,
742 eq->ipz_eq_handle.handle,
743 0, 0, 0, 0, 0);
744
745 if (ret == H_RESOURCE)
746 ehca_gen_err("Resource in use. ret=%lli ", ret);
747
748 return ret;
749}
750
751u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
752 const struct ehca_mr *mr,
753 const u64 vaddr,
754 const u64 length,
755 const u32 access_ctrl,
756 const struct ipz_pd pd,
757 struct ehca_mr_hipzout_parms *outparms)
758{
759 u64 ret;
760 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
761
762 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
763 adapter_handle.handle,
764 5,
765 vaddr,
766 length,
767 (((u64)access_ctrl) << 32ULL),
768 pd.value,
769 0, 0, 0);
770 outparms->handle.handle = outs[0];
771 outparms->lkey = (u32)outs[2];
772 outparms->rkey = (u32)outs[3];
773
774 return ret;
775}
776
777u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
778 const struct ehca_mr *mr,
779 const u8 pagesize,
780 const u8 queue_type,
781 const u64 logical_address_of_page,
782 const u64 count)
783{
784 u64 ret;
785
786 if (unlikely(ehca_debug_level >= 3)) {
787 if (count > 1) {
788 u64 *kpage;
789 int i;
790 kpage = (u64 *)abs_to_virt(logical_address_of_page);
791 for (i = 0; i < count; i++)
792 ehca_gen_dbg("kpage[%d]=%p",
793 i, (void *)kpage[i]);
794 } else
795 ehca_gen_dbg("kpage=%p",
796 (void *)logical_address_of_page);
797 }
798
799 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
800 ehca_gen_err("logical_address_of_page not on a 4k boundary "
801 "adapter_handle=%llx mr=%p mr_handle=%llx "
802 "pagesize=%x queue_type=%x "
803 "logical_address_of_page=%llx count=%llx",
804 adapter_handle.handle, mr,
805 mr->ipz_mr_handle.handle, pagesize, queue_type,
806 logical_address_of_page, count);
807 ret = H_PARAMETER;
808 } else
809 ret = hipz_h_register_rpage(adapter_handle, pagesize,
810 queue_type,
811 mr->ipz_mr_handle.handle,
812 logical_address_of_page, count);
813 return ret;
814}
815
816u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
817 const struct ehca_mr *mr,
818 struct ehca_mr_hipzout_parms *outparms)
819{
820 u64 ret;
821 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
822
823 ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
824 adapter_handle.handle,
825 mr->ipz_mr_handle.handle,
826 0, 0, 0, 0, 0, 0, 0);
827 outparms->len = outs[0];
828 outparms->vaddr = outs[1];
829 outparms->acl = outs[4] >> 32;
830 outparms->lkey = (u32)(outs[5] >> 32);
831 outparms->rkey = (u32)(outs[5] & (0xffffffff));
832
833 return ret;
834}
835
836u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
837 const struct ehca_mr *mr)
838{
839 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
840 adapter_handle.handle,
841 mr->ipz_mr_handle.handle,
842 0, 0, 0, 0, 0);
843}
844
845u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
846 const struct ehca_mr *mr,
847 const u64 vaddr_in,
848 const u64 length,
849 const u32 access_ctrl,
850 const struct ipz_pd pd,
851 const u64 mr_addr_cb,
852 struct ehca_mr_hipzout_parms *outparms)
853{
854 u64 ret;
855 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
856
857 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
858 adapter_handle.handle,
859 mr->ipz_mr_handle.handle,
860 vaddr_in,
861 length,
862
863 ((((u64)access_ctrl) << 32ULL) | pd.value),
864 mr_addr_cb,
865 0, 0, 0);
866 outparms->vaddr = outs[1];
867 outparms->lkey = (u32)outs[2];
868 outparms->rkey = (u32)outs[3];
869
870 return ret;
871}
872
873u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
874 const struct ehca_mr *mr,
875 const struct ehca_mr *orig_mr,
876 const u64 vaddr_in,
877 const u32 access_ctrl,
878 const struct ipz_pd pd,
879 struct ehca_mr_hipzout_parms *outparms)
880{
881 u64 ret;
882 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
883
884 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
885 adapter_handle.handle,
886 orig_mr->ipz_mr_handle.handle,
887 vaddr_in,
888 (((u64)access_ctrl) << 32ULL),
889 pd.value,
890 0, 0, 0, 0);
891 outparms->handle.handle = outs[0];
892 outparms->lkey = (u32)outs[2];
893 outparms->rkey = (u32)outs[3];
894
895 return ret;
896}
897
898u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
899 const struct ehca_mw *mw,
900 const struct ipz_pd pd,
901 struct ehca_mw_hipzout_parms *outparms)
902{
903 u64 ret;
904 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
905
906 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
907 adapter_handle.handle,
908 6,
909 pd.value,
910 0, 0, 0, 0, 0, 0);
911 outparms->handle.handle = outs[0];
912 outparms->rkey = (u32)outs[3];
913
914 return ret;
915}
916
917u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
918 const struct ehca_mw *mw,
919 struct ehca_mw_hipzout_parms *outparms)
920{
921 u64 ret;
922 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
923
924 ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
925 adapter_handle.handle,
926 mw->ipz_mw_handle.handle,
927 0, 0, 0, 0, 0, 0, 0);
928 outparms->rkey = (u32)outs[3];
929
930 return ret;
931}
932
933u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
934 const struct ehca_mw *mw)
935{
936 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
937 adapter_handle.handle,
938 mw->ipz_mw_handle.handle,
939 0, 0, 0, 0, 0);
940}
941
942u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
943 const u64 ressource_handle,
944 void *rblock,
945 unsigned long *byte_count)
946{
947 u64 r_cb = virt_to_abs(rblock);
948
949 if (r_cb & (EHCA_PAGESIZE-1)) {
950 ehca_gen_err("rblock not page aligned.");
951 return H_PARAMETER;
952 }
953
954 return ehca_plpar_hcall_norets(H_ERROR_DATA,
955 adapter_handle.handle,
956 ressource_handle,
957 r_cb,
958 0, 0, 0, 0);
959}
960
961u64 hipz_h_eoi(int irq)
962{
963 unsigned long xirr;
964
965 iosync();
966 xirr = (0xffULL << 24) | irq;
967
968 return plpar_hcall_norets(H_EOI, xirr);
969}
970