1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#include "ehca_classes.h"
45#include "ehca_irq.h"
46#include "ehca_iverbs.h"
47#include "ehca_tools.h"
48#include "hcp_if.h"
49#include "hipz_fns.h"
50#include "ipz_pt_fn.h"
51
52#define EQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
53#define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
54#define EQE_EE_IDENTIFIER EHCA_BMASK_IBM( 2, 7)
55#define EQE_CQ_NUMBER EHCA_BMASK_IBM( 8, 31)
56#define EQE_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
57#define EQE_QP_TOKEN EHCA_BMASK_IBM(32, 63)
58#define EQE_CQ_TOKEN EHCA_BMASK_IBM(32, 63)
59
60#define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
61#define NEQE_EVENT_CODE EHCA_BMASK_IBM( 2, 7)
62#define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15)
63#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
64#define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16)
65#define NEQE_SPECIFIC_EVENT EHCA_BMASK_IBM(16, 23)
66
67#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63)
68#define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7)
69
70static void queue_comp_task(struct ehca_cq *__cq);
71
72static struct ehca_comp_pool *pool;
73
74static inline void comp_event_callback(struct ehca_cq *cq)
75{
76 if (!cq->ib_cq.comp_handler)
77 return;
78
79 spin_lock(&cq->cb_lock);
80 cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context);
81 spin_unlock(&cq->cb_lock);
82
83 return;
84}
85
86static void print_error_data(struct ehca_shca *shca, void *data,
87 u64 *rblock, int length)
88{
89 u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
90 u64 resource = rblock[1];
91
92 switch (type) {
93 case 0x1:
94 {
95 struct ehca_qp *qp = (struct ehca_qp *)data;
96
97
98 if (rblock[6] == 0)
99 return;
100
101 ehca_err(&shca->ib_device,
102 "QP 0x%x (resource=%llx) has errors.",
103 qp->ib_qp.qp_num, resource);
104 break;
105 }
106 case 0x4:
107 {
108 struct ehca_cq *cq = (struct ehca_cq *)data;
109
110 ehca_err(&shca->ib_device,
111 "CQ 0x%x (resource=%llx) has errors.",
112 cq->cq_number, resource);
113 break;
114 }
115 default:
116 ehca_err(&shca->ib_device,
117 "Unknown error type: %llx on %s.",
118 type, shca->ib_device.name);
119 break;
120 }
121
122 ehca_err(&shca->ib_device, "Error data is available: %llx.", resource);
123 ehca_err(&shca->ib_device, "EHCA ----- error data begin "
124 "---------------------------------------------------");
125 ehca_dmp(rblock, length, "resource=%llx", resource);
126 ehca_err(&shca->ib_device, "EHCA ----- error data end "
127 "----------------------------------------------------");
128
129 return;
130}
131
132int ehca_error_data(struct ehca_shca *shca, void *data,
133 u64 resource)
134{
135
136 unsigned long ret;
137 u64 *rblock;
138 unsigned long block_count;
139
140 rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
141 if (!rblock) {
142 ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
143 ret = -ENOMEM;
144 goto error_data1;
145 }
146
147
148 ret = hipz_h_error_data(shca->ipz_hca_handle,
149 resource,
150 rblock,
151 &block_count);
152
153 if (ret == H_R_STATE)
154 ehca_err(&shca->ib_device,
155 "No error data is available: %llx.", resource);
156 else if (ret == H_SUCCESS) {
157 int length;
158
159 length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]);
160
161 if (length > EHCA_PAGESIZE)
162 length = EHCA_PAGESIZE;
163
164 print_error_data(shca, data, rblock, length);
165 } else
166 ehca_err(&shca->ib_device,
167 "Error data could not be fetched: %llx", resource);
168
169 ehca_free_fw_ctrlblock(rblock);
170
171error_data1:
172 return ret;
173
174}
175
176static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp,
177 enum ib_event_type event_type)
178{
179 struct ib_event event;
180
181
182 if (event_type == IB_EVENT_PATH_MIG && !qp->mig_armed)
183 return;
184
185 event.device = &shca->ib_device;
186 event.event = event_type;
187
188 if (qp->ext_type == EQPT_SRQ) {
189 if (!qp->ib_srq.event_handler)
190 return;
191
192 event.element.srq = &qp->ib_srq;
193 qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context);
194 } else {
195 if (!qp->ib_qp.event_handler)
196 return;
197
198 event.element.qp = &qp->ib_qp;
199 qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
200 }
201}
202
203static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
204 enum ib_event_type event_type, int fatal)
205{
206 struct ehca_qp *qp;
207 u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
208
209 read_lock(&ehca_qp_idr_lock);
210 qp = idr_find(&ehca_qp_idr, token);
211 if (qp)
212 atomic_inc(&qp->nr_events);
213 read_unlock(&ehca_qp_idr_lock);
214
215 if (!qp)
216 return;
217
218 if (fatal)
219 ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
220
221 dispatch_qp_event(shca, qp, fatal && qp->ext_type == EQPT_SRQ ?
222 IB_EVENT_SRQ_ERR : event_type);
223
224
225
226
227
228
229 if (fatal && qp->ext_type == EQPT_SRQBASE)
230 dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED);
231
232 if (atomic_dec_and_test(&qp->nr_events))
233 wake_up(&qp->wait_completion);
234 return;
235}
236
237static void cq_event_callback(struct ehca_shca *shca,
238 u64 eqe)
239{
240 struct ehca_cq *cq;
241 u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
242
243 read_lock(&ehca_cq_idr_lock);
244 cq = idr_find(&ehca_cq_idr, token);
245 if (cq)
246 atomic_inc(&cq->nr_events);
247 read_unlock(&ehca_cq_idr_lock);
248
249 if (!cq)
250 return;
251
252 ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
253
254 if (atomic_dec_and_test(&cq->nr_events))
255 wake_up(&cq->wait_completion);
256
257 return;
258}
259
260static void parse_identifier(struct ehca_shca *shca, u64 eqe)
261{
262 u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);
263
264 switch (identifier) {
265 case 0x02:
266 qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG, 0);
267 break;
268 case 0x03:
269 qp_event_callback(shca, eqe, IB_EVENT_COMM_EST, 0);
270 break;
271 case 0x04:
272 qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED, 0);
273 break;
274 case 0x05:
275 case 0x06:
276 qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL, 1);
277 break;
278 case 0x07:
279 case 0x08:
280 cq_event_callback(shca, eqe);
281 break;
282 case 0x09:
283 ehca_err(&shca->ib_device, "MRMWPTE error.");
284 break;
285 case 0x0A:
286 ehca_err(&shca->ib_device, "Port event.");
287 break;
288 case 0x0B:
289 ehca_err(&shca->ib_device, "MR access error.");
290 break;
291 case 0x0C:
292 ehca_err(&shca->ib_device, "EQ error.");
293 break;
294 case 0x0D:
295 ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
296 break;
297 case 0x10:
298 ehca_err(&shca->ib_device, "Sampling complete.");
299 break;
300 case 0x11:
301 ehca_err(&shca->ib_device, "Unaffiliated access error.");
302 break;
303 case 0x12:
304 ehca_err(&shca->ib_device, "Path migrating.");
305 break;
306 case 0x13:
307 ehca_err(&shca->ib_device, "Interface trace stopped.");
308 break;
309 case 0x14:
310 ehca_info(&shca->ib_device, "First error capture available");
311 break;
312 case 0x15:
313 qp_event_callback(shca, eqe, IB_EVENT_SRQ_LIMIT_REACHED, 0);
314 break;
315 default:
316 ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
317 identifier, shca->ib_device.name);
318 break;
319 }
320
321 return;
322}
323
324static void dispatch_port_event(struct ehca_shca *shca, int port_num,
325 enum ib_event_type type, const char *msg)
326{
327 struct ib_event event;
328
329 ehca_info(&shca->ib_device, "port %d %s.", port_num, msg);
330 event.device = &shca->ib_device;
331 event.event = type;
332 event.element.port_num = port_num;
333 ib_dispatch_event(&event);
334}
335
336static void notify_port_conf_change(struct ehca_shca *shca, int port_num)
337{
338 struct ehca_sma_attr new_attr;
339 struct ehca_sma_attr *old_attr = &shca->sport[port_num - 1].saved_attr;
340
341 ehca_query_sma_attr(shca, port_num, &new_attr);
342
343 if (new_attr.sm_sl != old_attr->sm_sl ||
344 new_attr.sm_lid != old_attr->sm_lid)
345 dispatch_port_event(shca, port_num, IB_EVENT_SM_CHANGE,
346 "SM changed");
347
348 if (new_attr.lid != old_attr->lid ||
349 new_attr.lmc != old_attr->lmc)
350 dispatch_port_event(shca, port_num, IB_EVENT_LID_CHANGE,
351 "LID changed");
352
353 if (new_attr.pkey_tbl_len != old_attr->pkey_tbl_len ||
354 memcmp(new_attr.pkeys, old_attr->pkeys,
355 sizeof(u16) * new_attr.pkey_tbl_len))
356 dispatch_port_event(shca, port_num, IB_EVENT_PKEY_CHANGE,
357 "P_Key changed");
358
359 *old_attr = new_attr;
360}
361
362
363static int replay_modify_qp(struct ehca_sport *sport)
364{
365 int aqp1_destroyed;
366 unsigned long flags;
367
368 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
369
370 aqp1_destroyed = !sport->ibqp_sqp[IB_QPT_GSI];
371
372 if (sport->ibqp_sqp[IB_QPT_SMI])
373 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
374 if (!aqp1_destroyed)
375 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
376
377 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
378
379 return aqp1_destroyed;
380}
381
382static void parse_ec(struct ehca_shca *shca, u64 eqe)
383{
384 u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
385 u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
386 u8 spec_event;
387 struct ehca_sport *sport = &shca->sport[port - 1];
388
389 switch (ec) {
390 case 0x30:
391 if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
392
393
394
395
396 if (ehca_nr_ports < 0)
397 if (replay_modify_qp(sport))
398 break;
399
400 sport->port_state = IB_PORT_ACTIVE;
401 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
402 "is active");
403 ehca_query_sma_attr(shca, port, &sport->saved_attr);
404 } else {
405 sport->port_state = IB_PORT_DOWN;
406 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
407 "is inactive");
408 }
409 break;
410 case 0x31:
411
412
413
414
415 if (EHCA_BMASK_GET(NEQE_DISRUPTIVE, eqe)) {
416 ehca_warn(&shca->ib_device, "disruptive port "
417 "%d configuration change", port);
418
419 sport->port_state = IB_PORT_DOWN;
420 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
421 "is inactive");
422
423 sport->port_state = IB_PORT_ACTIVE;
424 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
425 "is active");
426 ehca_query_sma_attr(shca, port,
427 &sport->saved_attr);
428 } else
429 notify_port_conf_change(shca, port);
430 break;
431 case 0x32:
432 ehca_err(&shca->ib_device, "Adapter malfunction.");
433 break;
434 case 0x33:
435 ehca_err(&shca->ib_device, "Traced stopped.");
436 break;
437 case 0x34:
438 spec_event = EHCA_BMASK_GET(NEQE_SPECIFIC_EVENT, eqe);
439 if (spec_event == 0x80)
440 dispatch_port_event(shca, port,
441 IB_EVENT_CLIENT_REREGISTER,
442 "client reregister req.");
443 else
444 ehca_warn(&shca->ib_device, "Unknown util async "
445 "event %x on port %x", spec_event, port);
446 break;
447 default:
448 ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
449 ec, shca->ib_device.name);
450 break;
451 }
452
453 return;
454}
455
456static inline void reset_eq_pending(struct ehca_cq *cq)
457{
458 u64 CQx_EP;
459 struct h_galpa gal = cq->galpas.kernel;
460
461 hipz_galpa_store_cq(gal, cqx_ep, 0x0);
462 CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep));
463
464 return;
465}
466
467irqreturn_t ehca_interrupt_neq(int irq, void *dev_id)
468{
469 struct ehca_shca *shca = (struct ehca_shca*)dev_id;
470
471 tasklet_hi_schedule(&shca->neq.interrupt_task);
472
473 return IRQ_HANDLED;
474}
475
476void ehca_tasklet_neq(unsigned long data)
477{
478 struct ehca_shca *shca = (struct ehca_shca*)data;
479 struct ehca_eqe *eqe;
480 u64 ret;
481
482 eqe = ehca_poll_eq(shca, &shca->neq);
483
484 while (eqe) {
485 if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
486 parse_ec(shca, eqe->entry);
487
488 eqe = ehca_poll_eq(shca, &shca->neq);
489 }
490
491 ret = hipz_h_reset_event(shca->ipz_hca_handle,
492 shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);
493
494 if (ret != H_SUCCESS)
495 ehca_err(&shca->ib_device, "Can't clear notification events.");
496
497 return;
498}
499
500irqreturn_t ehca_interrupt_eq(int irq, void *dev_id)
501{
502 struct ehca_shca *shca = (struct ehca_shca*)dev_id;
503
504 tasklet_hi_schedule(&shca->eq.interrupt_task);
505
506 return IRQ_HANDLED;
507}
508
509
510static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
511{
512 u64 eqe_value;
513 u32 token;
514 struct ehca_cq *cq;
515
516 eqe_value = eqe->entry;
517 ehca_dbg(&shca->ib_device, "eqe_value=%llx", eqe_value);
518 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
519 ehca_dbg(&shca->ib_device, "Got completion event");
520 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
521 read_lock(&ehca_cq_idr_lock);
522 cq = idr_find(&ehca_cq_idr, token);
523 if (cq)
524 atomic_inc(&cq->nr_events);
525 read_unlock(&ehca_cq_idr_lock);
526 if (cq == NULL) {
527 ehca_err(&shca->ib_device,
528 "Invalid eqe for non-existing cq token=%x",
529 token);
530 return;
531 }
532 reset_eq_pending(cq);
533 if (ehca_scaling_code)
534 queue_comp_task(cq);
535 else {
536 comp_event_callback(cq);
537 if (atomic_dec_and_test(&cq->nr_events))
538 wake_up(&cq->wait_completion);
539 }
540 } else {
541 ehca_dbg(&shca->ib_device, "Got non completion event");
542 parse_identifier(shca, eqe_value);
543 }
544}
545
546void ehca_process_eq(struct ehca_shca *shca, int is_irq)
547{
548 struct ehca_eq *eq = &shca->eq;
549 struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
550 u64 eqe_value, ret;
551 unsigned long flags;
552 int eqe_cnt, i;
553 int eq_empty = 0;
554
555 spin_lock_irqsave(&eq->irq_spinlock, flags);
556 if (is_irq) {
557 const int max_query_cnt = 100;
558 int query_cnt = 0;
559 int int_state = 1;
560 do {
561 int_state = hipz_h_query_int_state(
562 shca->ipz_hca_handle, eq->ist);
563 query_cnt++;
564 iosync();
565 } while (int_state && query_cnt < max_query_cnt);
566 if (unlikely((query_cnt == max_query_cnt)))
567 ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
568 int_state, query_cnt);
569 }
570
571
572 eqe_cnt = 0;
573 do {
574 u32 token;
575 eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq);
576 if (!eqe_cache[eqe_cnt].eqe)
577 break;
578 eqe_value = eqe_cache[eqe_cnt].eqe->entry;
579 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
580 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
581 read_lock(&ehca_cq_idr_lock);
582 eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
583 if (eqe_cache[eqe_cnt].cq)
584 atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
585 read_unlock(&ehca_cq_idr_lock);
586 if (!eqe_cache[eqe_cnt].cq) {
587 ehca_err(&shca->ib_device,
588 "Invalid eqe for non-existing cq "
589 "token=%x", token);
590 continue;
591 }
592 } else
593 eqe_cache[eqe_cnt].cq = NULL;
594 eqe_cnt++;
595 } while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
596 if (!eqe_cnt) {
597 if (is_irq)
598 ehca_dbg(&shca->ib_device,
599 "No eqe found for irq event");
600 goto unlock_irq_spinlock;
601 } else if (!is_irq) {
602 ret = hipz_h_eoi(eq->ist);
603 if (ret != H_SUCCESS)
604 ehca_err(&shca->ib_device,
605 "bad return code EOI -rc = %lld\n", ret);
606 ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
607 }
608 if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
609 ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
610
611 for (i = 0; i < eqe_cnt; i++) {
612 if (eq->eqe_cache[i].cq)
613 reset_eq_pending(eq->eqe_cache[i].cq);
614 }
615
616 spin_lock(&eq->spinlock);
617 eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
618 spin_unlock(&eq->spinlock);
619
620 for (i = 0; i < eqe_cnt; i++)
621 if (eq->eqe_cache[i].cq) {
622 if (ehca_scaling_code)
623 queue_comp_task(eq->eqe_cache[i].cq);
624 else {
625 struct ehca_cq *cq = eq->eqe_cache[i].cq;
626 comp_event_callback(cq);
627 if (atomic_dec_and_test(&cq->nr_events))
628 wake_up(&cq->wait_completion);
629 }
630 } else {
631 ehca_dbg(&shca->ib_device, "Got non completion event");
632 parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
633 }
634
635 if (eq_empty)
636 goto unlock_irq_spinlock;
637 do {
638 struct ehca_eqe *eqe;
639 eqe = ehca_poll_eq(shca, &shca->eq);
640 if (!eqe)
641 break;
642 process_eqe(shca, eqe);
643 } while (1);
644
645unlock_irq_spinlock:
646 spin_unlock_irqrestore(&eq->irq_spinlock, flags);
647}
648
649void ehca_tasklet_eq(unsigned long data)
650{
651 ehca_process_eq((struct ehca_shca*)data, 1);
652}
653
654static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
655{
656 int cpu;
657 unsigned long flags;
658
659 WARN_ON_ONCE(!in_interrupt());
660 if (ehca_debug_level >= 3)
661 ehca_dmp(cpu_online_mask, cpumask_size(), "");
662
663 spin_lock_irqsave(&pool->last_cpu_lock, flags);
664 cpu = cpumask_next(pool->last_cpu, cpu_online_mask);
665 if (cpu >= nr_cpu_ids)
666 cpu = cpumask_first(cpu_online_mask);
667 pool->last_cpu = cpu;
668 spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
669
670 return cpu;
671}
672
673static void __queue_comp_task(struct ehca_cq *__cq,
674 struct ehca_cpu_comp_task *cct)
675{
676 unsigned long flags;
677
678 spin_lock_irqsave(&cct->task_lock, flags);
679 spin_lock(&__cq->task_lock);
680
681 if (__cq->nr_callbacks == 0) {
682 __cq->nr_callbacks++;
683 list_add_tail(&__cq->entry, &cct->cq_list);
684 cct->cq_jobs++;
685 wake_up(&cct->wait_queue);
686 } else
687 __cq->nr_callbacks++;
688
689 spin_unlock(&__cq->task_lock);
690 spin_unlock_irqrestore(&cct->task_lock, flags);
691}
692
693static void queue_comp_task(struct ehca_cq *__cq)
694{
695 int cpu_id;
696 struct ehca_cpu_comp_task *cct;
697 int cq_jobs;
698 unsigned long flags;
699
700 cpu_id = find_next_online_cpu(pool);
701 BUG_ON(!cpu_online(cpu_id));
702
703 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
704 BUG_ON(!cct);
705
706 spin_lock_irqsave(&cct->task_lock, flags);
707 cq_jobs = cct->cq_jobs;
708 spin_unlock_irqrestore(&cct->task_lock, flags);
709 if (cq_jobs > 0) {
710 cpu_id = find_next_online_cpu(pool);
711 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
712 BUG_ON(!cct);
713 }
714
715 __queue_comp_task(__cq, cct);
716}
717
718static void run_comp_task(struct ehca_cpu_comp_task *cct)
719{
720 struct ehca_cq *cq;
721 unsigned long flags;
722
723 spin_lock_irqsave(&cct->task_lock, flags);
724
725 while (!list_empty(&cct->cq_list)) {
726 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
727 spin_unlock_irqrestore(&cct->task_lock, flags);
728
729 comp_event_callback(cq);
730 if (atomic_dec_and_test(&cq->nr_events))
731 wake_up(&cq->wait_completion);
732
733 spin_lock_irqsave(&cct->task_lock, flags);
734 spin_lock(&cq->task_lock);
735 cq->nr_callbacks--;
736 if (!cq->nr_callbacks) {
737 list_del_init(cct->cq_list.next);
738 cct->cq_jobs--;
739 }
740 spin_unlock(&cq->task_lock);
741 }
742
743 spin_unlock_irqrestore(&cct->task_lock, flags);
744}
745
746static int comp_task(void *__cct)
747{
748 struct ehca_cpu_comp_task *cct = __cct;
749 int cql_empty;
750 DECLARE_WAITQUEUE(wait, current);
751
752 set_current_state(TASK_INTERRUPTIBLE);
753 while (!kthread_should_stop()) {
754 add_wait_queue(&cct->wait_queue, &wait);
755
756 spin_lock_irq(&cct->task_lock);
757 cql_empty = list_empty(&cct->cq_list);
758 spin_unlock_irq(&cct->task_lock);
759 if (cql_empty)
760 schedule();
761 else
762 __set_current_state(TASK_RUNNING);
763
764 remove_wait_queue(&cct->wait_queue, &wait);
765
766 spin_lock_irq(&cct->task_lock);
767 cql_empty = list_empty(&cct->cq_list);
768 spin_unlock_irq(&cct->task_lock);
769 if (!cql_empty)
770 run_comp_task(__cct);
771
772 set_current_state(TASK_INTERRUPTIBLE);
773 }
774 __set_current_state(TASK_RUNNING);
775
776 return 0;
777}
778
779static struct task_struct *create_comp_task(struct ehca_comp_pool *pool,
780 int cpu)
781{
782 struct ehca_cpu_comp_task *cct;
783
784 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
785 spin_lock_init(&cct->task_lock);
786 INIT_LIST_HEAD(&cct->cq_list);
787 init_waitqueue_head(&cct->wait_queue);
788 cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu);
789
790 return cct->task;
791}
792
793static void destroy_comp_task(struct ehca_comp_pool *pool,
794 int cpu)
795{
796 struct ehca_cpu_comp_task *cct;
797 struct task_struct *task;
798 unsigned long flags_cct;
799
800 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
801
802 spin_lock_irqsave(&cct->task_lock, flags_cct);
803
804 task = cct->task;
805 cct->task = NULL;
806 cct->cq_jobs = 0;
807
808 spin_unlock_irqrestore(&cct->task_lock, flags_cct);
809
810 if (task)
811 kthread_stop(task);
812}
813
814static void __cpuinit take_over_work(struct ehca_comp_pool *pool, int cpu)
815{
816 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
817 LIST_HEAD(list);
818 struct ehca_cq *cq;
819 unsigned long flags_cct;
820
821 spin_lock_irqsave(&cct->task_lock, flags_cct);
822
823 list_splice_init(&cct->cq_list, &list);
824
825 while (!list_empty(&list)) {
826 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
827
828 list_del(&cq->entry);
829 __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,
830 smp_processor_id()));
831 }
832
833 spin_unlock_irqrestore(&cct->task_lock, flags_cct);
834
835}
836
837static int __cpuinit comp_pool_callback(struct notifier_block *nfb,
838 unsigned long action,
839 void *hcpu)
840{
841 unsigned int cpu = (unsigned long)hcpu;
842 struct ehca_cpu_comp_task *cct;
843
844 switch (action) {
845 case CPU_UP_PREPARE:
846 case CPU_UP_PREPARE_FROZEN:
847 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
848 if (!create_comp_task(pool, cpu)) {
849 ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
850 return NOTIFY_BAD;
851 }
852 break;
853 case CPU_UP_CANCELED:
854 case CPU_UP_CANCELED_FROZEN:
855 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
856 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
857 kthread_bind(cct->task, cpumask_any(cpu_online_mask));
858 destroy_comp_task(pool, cpu);
859 break;
860 case CPU_ONLINE:
861 case CPU_ONLINE_FROZEN:
862 ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);
863 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
864 kthread_bind(cct->task, cpu);
865 wake_up_process(cct->task);
866 break;
867 case CPU_DOWN_PREPARE:
868 case CPU_DOWN_PREPARE_FROZEN:
869 ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);
870 break;
871 case CPU_DOWN_FAILED:
872 case CPU_DOWN_FAILED_FROZEN:
873 ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);
874 break;
875 case CPU_DEAD:
876 case CPU_DEAD_FROZEN:
877 ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);
878 destroy_comp_task(pool, cpu);
879 take_over_work(pool, cpu);
880 break;
881 }
882
883 return NOTIFY_OK;
884}
885
886static struct notifier_block comp_pool_callback_nb __cpuinitdata = {
887 .notifier_call = comp_pool_callback,
888 .priority = 0,
889};
890
891int ehca_create_comp_pool(void)
892{
893 int cpu;
894 struct task_struct *task;
895
896 if (!ehca_scaling_code)
897 return 0;
898
899 pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
900 if (pool == NULL)
901 return -ENOMEM;
902
903 spin_lock_init(&pool->last_cpu_lock);
904 pool->last_cpu = cpumask_any(cpu_online_mask);
905
906 pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
907 if (pool->cpu_comp_tasks == NULL) {
908 kfree(pool);
909 return -EINVAL;
910 }
911
912 for_each_online_cpu(cpu) {
913 task = create_comp_task(pool, cpu);
914 if (task) {
915 kthread_bind(task, cpu);
916 wake_up_process(task);
917 }
918 }
919
920 register_hotcpu_notifier(&comp_pool_callback_nb);
921
922 printk(KERN_INFO "eHCA scaling code enabled\n");
923
924 return 0;
925}
926
927void ehca_destroy_comp_pool(void)
928{
929 int i;
930
931 if (!ehca_scaling_code)
932 return;
933
934 unregister_hotcpu_notifier(&comp_pool_callback_nb);
935
936 for_each_online_cpu(i)
937 destroy_comp_task(pool, i);
938
939 free_percpu(pool->cpu_comp_tasks);
940 kfree(pool);
941}
942