1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/debugfs.h>
35#include <linux/vmalloc.h>
36#include <linux/math64.h>
37
38#include <rdma/ib_verbs.h>
39
40#include "iw_cxgb4.h"
41
42#define DRV_VERSION "0.1"
43
44MODULE_AUTHOR("Steve Wise");
45MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
46MODULE_LICENSE("Dual BSD/GPL");
47
48static int allow_db_fc_on_t5;
49module_param(allow_db_fc_on_t5, int, 0644);
50MODULE_PARM_DESC(allow_db_fc_on_t5,
51 "Allow DB Flow Control on T5 (default = 0)");
52
53static int allow_db_coalescing_on_t5;
54module_param(allow_db_coalescing_on_t5, int, 0644);
55MODULE_PARM_DESC(allow_db_coalescing_on_t5,
56 "Allow DB Coalescing on T5 (default = 0)");
57
58int c4iw_wr_log = 0;
59module_param(c4iw_wr_log, int, 0444);
60MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data.");
61
62static int c4iw_wr_log_size_order = 12;
63module_param(c4iw_wr_log_size_order, int, 0444);
64MODULE_PARM_DESC(c4iw_wr_log_size_order,
65 "Number of entries (log2) in the work request timing log.");
66
67static LIST_HEAD(uld_ctx_list);
68static DEFINE_MUTEX(dev_mutex);
69static struct workqueue_struct *reg_workq;
70
71#define DB_FC_RESUME_SIZE 64
72#define DB_FC_RESUME_DELAY 1
73#define DB_FC_DRAIN_THRESH 0
74
75static struct dentry *c4iw_debugfs_root;
76
77struct c4iw_debugfs_data {
78 struct c4iw_dev *devp;
79 char *buf;
80 int bufsize;
81 int pos;
82};
83
84static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
85 loff_t *ppos)
86{
87 struct c4iw_debugfs_data *d = file->private_data;
88
89 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
90}
91
92void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
93{
94 struct wr_log_entry le;
95 int idx;
96
97 if (!wq->rdev->wr_log)
98 return;
99
100 idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
101 (wq->rdev->wr_log_size - 1);
102 le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
103 le.poll_host_time = ktime_get();
104 le.valid = 1;
105 le.cqe_sge_ts = CQE_TS(cqe);
106 if (SQ_TYPE(cqe)) {
107 le.qid = wq->sq.qid;
108 le.opcode = CQE_OPCODE(cqe);
109 le.post_host_time = wq->sq.sw_sq[wq->sq.cidx].host_time;
110 le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts;
111 le.wr_id = CQE_WRID_SQ_IDX(cqe);
112 } else {
113 le.qid = wq->rq.qid;
114 le.opcode = FW_RI_RECEIVE;
115 le.post_host_time = wq->rq.sw_rq[wq->rq.cidx].host_time;
116 le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
117 le.wr_id = CQE_WRID_MSN(cqe);
118 }
119 wq->rdev->wr_log[idx] = le;
120}
121
122static int wr_log_show(struct seq_file *seq, void *v)
123{
124 struct c4iw_dev *dev = seq->private;
125 ktime_t prev_time;
126 struct wr_log_entry *lep;
127 int prev_time_set = 0;
128 int idx, end;
129
130#define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
131
132 idx = atomic_read(&dev->rdev.wr_log_idx) &
133 (dev->rdev.wr_log_size - 1);
134 end = idx - 1;
135 if (end < 0)
136 end = dev->rdev.wr_log_size - 1;
137 lep = &dev->rdev.wr_log[idx];
138 while (idx != end) {
139 if (lep->valid) {
140 if (!prev_time_set) {
141 prev_time_set = 1;
142 prev_time = lep->poll_host_time;
143 }
144 seq_printf(seq, "%04u: nsec %llu qid %u opcode "
145 "%u %s 0x%x host_wr_delta nsec %llu "
146 "post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
147 "poll_sge_ts 0x%llx post_poll_delta_ns %llu "
148 "cqe_poll_delta_ns %llu\n",
149 idx,
150 ktime_to_ns(ktime_sub(lep->poll_host_time,
151 prev_time)),
152 lep->qid, lep->opcode,
153 lep->opcode == FW_RI_RECEIVE ?
154 "msn" : "wrid",
155 lep->wr_id,
156 ktime_to_ns(ktime_sub(lep->poll_host_time,
157 lep->post_host_time)),
158 lep->post_sge_ts, lep->cqe_sge_ts,
159 lep->poll_sge_ts,
160 ts2ns(lep->poll_sge_ts - lep->post_sge_ts),
161 ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts));
162 prev_time = lep->poll_host_time;
163 }
164 idx++;
165 if (idx > (dev->rdev.wr_log_size - 1))
166 idx = 0;
167 lep = &dev->rdev.wr_log[idx];
168 }
169#undef ts2ns
170 return 0;
171}
172
173static int wr_log_open(struct inode *inode, struct file *file)
174{
175 return single_open(file, wr_log_show, inode->i_private);
176}
177
178static ssize_t wr_log_clear(struct file *file, const char __user *buf,
179 size_t count, loff_t *pos)
180{
181 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
182 int i;
183
184 if (dev->rdev.wr_log)
185 for (i = 0; i < dev->rdev.wr_log_size; i++)
186 dev->rdev.wr_log[i].valid = 0;
187 return count;
188}
189
190static const struct file_operations wr_log_debugfs_fops = {
191 .owner = THIS_MODULE,
192 .open = wr_log_open,
193 .release = single_release,
194 .read = seq_read,
195 .llseek = seq_lseek,
196 .write = wr_log_clear,
197};
198
199static struct sockaddr_in zero_sin = {
200 .sin_family = AF_INET,
201};
202
203static struct sockaddr_in6 zero_sin6 = {
204 .sin6_family = AF_INET6,
205};
206
207static void set_ep_sin_addrs(struct c4iw_ep *ep,
208 struct sockaddr_in **lsin,
209 struct sockaddr_in **rsin,
210 struct sockaddr_in **m_lsin,
211 struct sockaddr_in **m_rsin)
212{
213 struct iw_cm_id *id = ep->com.cm_id;
214
215 *m_lsin = (struct sockaddr_in *)&ep->com.local_addr;
216 *m_rsin = (struct sockaddr_in *)&ep->com.remote_addr;
217 if (id) {
218 *lsin = (struct sockaddr_in *)&id->local_addr;
219 *rsin = (struct sockaddr_in *)&id->remote_addr;
220 } else {
221 *lsin = &zero_sin;
222 *rsin = &zero_sin;
223 }
224}
225
226static void set_ep_sin6_addrs(struct c4iw_ep *ep,
227 struct sockaddr_in6 **lsin6,
228 struct sockaddr_in6 **rsin6,
229 struct sockaddr_in6 **m_lsin6,
230 struct sockaddr_in6 **m_rsin6)
231{
232 struct iw_cm_id *id = ep->com.cm_id;
233
234 *m_lsin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
235 *m_rsin6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
236 if (id) {
237 *lsin6 = (struct sockaddr_in6 *)&id->local_addr;
238 *rsin6 = (struct sockaddr_in6 *)&id->remote_addr;
239 } else {
240 *lsin6 = &zero_sin6;
241 *rsin6 = &zero_sin6;
242 }
243}
244
245static int dump_qp(struct c4iw_qp *qp, struct c4iw_debugfs_data *qpd)
246{
247 int space;
248 int cc;
249
250 space = qpd->bufsize - qpd->pos - 1;
251 if (space == 0)
252 return 1;
253
254 if (qp->ep) {
255 struct c4iw_ep *ep = qp->ep;
256
257 if (ep->com.local_addr.ss_family == AF_INET) {
258 struct sockaddr_in *lsin;
259 struct sockaddr_in *rsin;
260 struct sockaddr_in *m_lsin;
261 struct sockaddr_in *m_rsin;
262
263 set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin);
264 cc = snprintf(qpd->buf + qpd->pos, space,
265 "rc qp sq id %u %s id %u state %u "
266 "onchip %u ep tid %u state %u "
267 "%pI4:%u/%u->%pI4:%u/%u\n",
268 qp->wq.sq.qid, qp->srq ? "srq" : "rq",
269 qp->srq ? qp->srq->idx : qp->wq.rq.qid,
270 (int)qp->attr.state,
271 qp->wq.sq.flags & T4_SQ_ONCHIP,
272 ep->hwtid, (int)ep->com.state,
273 &lsin->sin_addr, ntohs(lsin->sin_port),
274 ntohs(m_lsin->sin_port),
275 &rsin->sin_addr, ntohs(rsin->sin_port),
276 ntohs(m_rsin->sin_port));
277 } else {
278 struct sockaddr_in6 *lsin6;
279 struct sockaddr_in6 *rsin6;
280 struct sockaddr_in6 *m_lsin6;
281 struct sockaddr_in6 *m_rsin6;
282
283 set_ep_sin6_addrs(ep, &lsin6, &rsin6, &m_lsin6,
284 &m_rsin6);
285 cc = snprintf(qpd->buf + qpd->pos, space,
286 "rc qp sq id %u rq id %u state %u "
287 "onchip %u ep tid %u state %u "
288 "%pI6:%u/%u->%pI6:%u/%u\n",
289 qp->wq.sq.qid, qp->wq.rq.qid,
290 (int)qp->attr.state,
291 qp->wq.sq.flags & T4_SQ_ONCHIP,
292 ep->hwtid, (int)ep->com.state,
293 &lsin6->sin6_addr,
294 ntohs(lsin6->sin6_port),
295 ntohs(m_lsin6->sin6_port),
296 &rsin6->sin6_addr,
297 ntohs(rsin6->sin6_port),
298 ntohs(m_rsin6->sin6_port));
299 }
300 } else
301 cc = snprintf(qpd->buf + qpd->pos, space,
302 "qp sq id %u rq id %u state %u onchip %u\n",
303 qp->wq.sq.qid, qp->wq.rq.qid,
304 (int)qp->attr.state,
305 qp->wq.sq.flags & T4_SQ_ONCHIP);
306 if (cc < space)
307 qpd->pos += cc;
308 return 0;
309}
310
311static int qp_release(struct inode *inode, struct file *file)
312{
313 struct c4iw_debugfs_data *qpd = file->private_data;
314 if (!qpd) {
315 pr_info("%s null qpd?\n", __func__);
316 return 0;
317 }
318 vfree(qpd->buf);
319 kfree(qpd);
320 return 0;
321}
322
323static int qp_open(struct inode *inode, struct file *file)
324{
325 struct c4iw_qp *qp;
326 struct c4iw_debugfs_data *qpd;
327 unsigned long index;
328 int count = 1;
329
330 qpd = kmalloc(sizeof(*qpd), GFP_KERNEL);
331 if (!qpd)
332 return -ENOMEM;
333
334 qpd->devp = inode->i_private;
335 qpd->pos = 0;
336
337
338
339
340
341 xa_for_each(&qpd->devp->qps, index, qp)
342 count++;
343
344 qpd->bufsize = count * 180;
345 qpd->buf = vmalloc(qpd->bufsize);
346 if (!qpd->buf) {
347 kfree(qpd);
348 return -ENOMEM;
349 }
350
351 xa_lock_irq(&qpd->devp->qps);
352 xa_for_each(&qpd->devp->qps, index, qp)
353 dump_qp(qp, qpd);
354 xa_unlock_irq(&qpd->devp->qps);
355
356 qpd->buf[qpd->pos++] = 0;
357 file->private_data = qpd;
358 return 0;
359}
360
361static const struct file_operations qp_debugfs_fops = {
362 .owner = THIS_MODULE,
363 .open = qp_open,
364 .release = qp_release,
365 .read = debugfs_read,
366 .llseek = default_llseek,
367};
368
369static int dump_stag(unsigned long id, struct c4iw_debugfs_data *stagd)
370{
371 int space;
372 int cc;
373 struct fw_ri_tpte tpte;
374 int ret;
375
376 space = stagd->bufsize - stagd->pos - 1;
377 if (space == 0)
378 return 1;
379
380 ret = cxgb4_read_tpte(stagd->devp->rdev.lldi.ports[0], (u32)id<<8,
381 (__be32 *)&tpte);
382 if (ret) {
383 dev_err(&stagd->devp->rdev.lldi.pdev->dev,
384 "%s cxgb4_read_tpte err %d\n", __func__, ret);
385 return ret;
386 }
387 cc = snprintf(stagd->buf + stagd->pos, space,
388 "stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
389 "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
390 (u32)id<<8,
391 FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
392 FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
393 FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
394 FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
395 FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
396 FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
397 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
398 ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
399 if (cc < space)
400 stagd->pos += cc;
401 return 0;
402}
403
404static int stag_release(struct inode *inode, struct file *file)
405{
406 struct c4iw_debugfs_data *stagd = file->private_data;
407 if (!stagd) {
408 pr_info("%s null stagd?\n", __func__);
409 return 0;
410 }
411 vfree(stagd->buf);
412 kfree(stagd);
413 return 0;
414}
415
416static int stag_open(struct inode *inode, struct file *file)
417{
418 struct c4iw_debugfs_data *stagd;
419 void *p;
420 unsigned long index;
421 int ret = 0;
422 int count = 1;
423
424 stagd = kmalloc(sizeof(*stagd), GFP_KERNEL);
425 if (!stagd) {
426 ret = -ENOMEM;
427 goto out;
428 }
429 stagd->devp = inode->i_private;
430 stagd->pos = 0;
431
432 xa_for_each(&stagd->devp->mrs, index, p)
433 count++;
434
435 stagd->bufsize = count * 256;
436 stagd->buf = vmalloc(stagd->bufsize);
437 if (!stagd->buf) {
438 ret = -ENOMEM;
439 goto err1;
440 }
441
442 xa_lock_irq(&stagd->devp->mrs);
443 xa_for_each(&stagd->devp->mrs, index, p)
444 dump_stag(index, stagd);
445 xa_unlock_irq(&stagd->devp->mrs);
446
447 stagd->buf[stagd->pos++] = 0;
448 file->private_data = stagd;
449 goto out;
450err1:
451 kfree(stagd);
452out:
453 return ret;
454}
455
456static const struct file_operations stag_debugfs_fops = {
457 .owner = THIS_MODULE,
458 .open = stag_open,
459 .release = stag_release,
460 .read = debugfs_read,
461 .llseek = default_llseek,
462};
463
464static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"};
465
466static int stats_show(struct seq_file *seq, void *v)
467{
468 struct c4iw_dev *dev = seq->private;
469
470 seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current",
471 "Max", "Fail");
472 seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n",
473 dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
474 dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail);
475 seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n",
476 dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
477 dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
478 seq_printf(seq, " SRQS: %10llu %10llu %10llu %10llu\n",
479 dev->rdev.stats.srqt.total, dev->rdev.stats.srqt.cur,
480 dev->rdev.stats.srqt.max, dev->rdev.stats.srqt.fail);
481 seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n",
482 dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
483 dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
484 seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n",
485 dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
486 dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail);
487 seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n",
488 dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
489 dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail);
490 seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n",
491 dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
492 dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail);
493 seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full);
494 seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
495 seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop);
496 seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n",
497 db_state_str[dev->db_state],
498 dev->rdev.stats.db_state_transitions,
499 dev->rdev.stats.db_fc_interruptions);
500 seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
501 seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
502 dev->rdev.stats.act_ofld_conn_fails);
503 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
504 dev->rdev.stats.pas_ofld_conn_fails);
505 seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv);
506 seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
507 return 0;
508}
509
510static int stats_open(struct inode *inode, struct file *file)
511{
512 return single_open(file, stats_show, inode->i_private);
513}
514
515static ssize_t stats_clear(struct file *file, const char __user *buf,
516 size_t count, loff_t *pos)
517{
518 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
519
520 mutex_lock(&dev->rdev.stats.lock);
521 dev->rdev.stats.pd.max = 0;
522 dev->rdev.stats.pd.fail = 0;
523 dev->rdev.stats.qid.max = 0;
524 dev->rdev.stats.qid.fail = 0;
525 dev->rdev.stats.stag.max = 0;
526 dev->rdev.stats.stag.fail = 0;
527 dev->rdev.stats.pbl.max = 0;
528 dev->rdev.stats.pbl.fail = 0;
529 dev->rdev.stats.rqt.max = 0;
530 dev->rdev.stats.rqt.fail = 0;
531 dev->rdev.stats.rqt.max = 0;
532 dev->rdev.stats.rqt.fail = 0;
533 dev->rdev.stats.ocqp.max = 0;
534 dev->rdev.stats.ocqp.fail = 0;
535 dev->rdev.stats.db_full = 0;
536 dev->rdev.stats.db_empty = 0;
537 dev->rdev.stats.db_drop = 0;
538 dev->rdev.stats.db_state_transitions = 0;
539 dev->rdev.stats.tcam_full = 0;
540 dev->rdev.stats.act_ofld_conn_fails = 0;
541 dev->rdev.stats.pas_ofld_conn_fails = 0;
542 mutex_unlock(&dev->rdev.stats.lock);
543 return count;
544}
545
546static const struct file_operations stats_debugfs_fops = {
547 .owner = THIS_MODULE,
548 .open = stats_open,
549 .release = single_release,
550 .read = seq_read,
551 .llseek = seq_lseek,
552 .write = stats_clear,
553};
554
555static int dump_ep(struct c4iw_ep *ep, struct c4iw_debugfs_data *epd)
556{
557 int space;
558 int cc;
559
560 space = epd->bufsize - epd->pos - 1;
561 if (space == 0)
562 return 1;
563
564 if (ep->com.local_addr.ss_family == AF_INET) {
565 struct sockaddr_in *lsin;
566 struct sockaddr_in *rsin;
567 struct sockaddr_in *m_lsin;
568 struct sockaddr_in *m_rsin;
569
570 set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin);
571 cc = snprintf(epd->buf + epd->pos, space,
572 "ep %p cm_id %p qp %p state %d flags 0x%lx "
573 "history 0x%lx hwtid %d atid %d "
574 "conn_na %u abort_na %u "
575 "%pI4:%d/%d <-> %pI4:%d/%d\n",
576 ep, ep->com.cm_id, ep->com.qp,
577 (int)ep->com.state, ep->com.flags,
578 ep->com.history, ep->hwtid, ep->atid,
579 ep->stats.connect_neg_adv,
580 ep->stats.abort_neg_adv,
581 &lsin->sin_addr, ntohs(lsin->sin_port),
582 ntohs(m_lsin->sin_port),
583 &rsin->sin_addr, ntohs(rsin->sin_port),
584 ntohs(m_rsin->sin_port));
585 } else {
586 struct sockaddr_in6 *lsin6;
587 struct sockaddr_in6 *rsin6;
588 struct sockaddr_in6 *m_lsin6;
589 struct sockaddr_in6 *m_rsin6;
590
591 set_ep_sin6_addrs(ep, &lsin6, &rsin6, &m_lsin6, &m_rsin6);
592 cc = snprintf(epd->buf + epd->pos, space,
593 "ep %p cm_id %p qp %p state %d flags 0x%lx "
594 "history 0x%lx hwtid %d atid %d "
595 "conn_na %u abort_na %u "
596 "%pI6:%d/%d <-> %pI6:%d/%d\n",
597 ep, ep->com.cm_id, ep->com.qp,
598 (int)ep->com.state, ep->com.flags,
599 ep->com.history, ep->hwtid, ep->atid,
600 ep->stats.connect_neg_adv,
601 ep->stats.abort_neg_adv,
602 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
603 ntohs(m_lsin6->sin6_port),
604 &rsin6->sin6_addr, ntohs(rsin6->sin6_port),
605 ntohs(m_rsin6->sin6_port));
606 }
607 if (cc < space)
608 epd->pos += cc;
609 return 0;
610}
611
612static
613int dump_listen_ep(struct c4iw_listen_ep *ep, struct c4iw_debugfs_data *epd)
614{
615 int space;
616 int cc;
617
618 space = epd->bufsize - epd->pos - 1;
619 if (space == 0)
620 return 1;
621
622 if (ep->com.local_addr.ss_family == AF_INET) {
623 struct sockaddr_in *lsin = (struct sockaddr_in *)
624 &ep->com.cm_id->local_addr;
625 struct sockaddr_in *m_lsin = (struct sockaddr_in *)
626 &ep->com.cm_id->m_local_addr;
627
628 cc = snprintf(epd->buf + epd->pos, space,
629 "ep %p cm_id %p state %d flags 0x%lx stid %d "
630 "backlog %d %pI4:%d/%d\n",
631 ep, ep->com.cm_id, (int)ep->com.state,
632 ep->com.flags, ep->stid, ep->backlog,
633 &lsin->sin_addr, ntohs(lsin->sin_port),
634 ntohs(m_lsin->sin_port));
635 } else {
636 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
637 &ep->com.cm_id->local_addr;
638 struct sockaddr_in6 *m_lsin6 = (struct sockaddr_in6 *)
639 &ep->com.cm_id->m_local_addr;
640
641 cc = snprintf(epd->buf + epd->pos, space,
642 "ep %p cm_id %p state %d flags 0x%lx stid %d "
643 "backlog %d %pI6:%d/%d\n",
644 ep, ep->com.cm_id, (int)ep->com.state,
645 ep->com.flags, ep->stid, ep->backlog,
646 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
647 ntohs(m_lsin6->sin6_port));
648 }
649 if (cc < space)
650 epd->pos += cc;
651 return 0;
652}
653
654static int ep_release(struct inode *inode, struct file *file)
655{
656 struct c4iw_debugfs_data *epd = file->private_data;
657 if (!epd) {
658 pr_info("%s null qpd?\n", __func__);
659 return 0;
660 }
661 vfree(epd->buf);
662 kfree(epd);
663 return 0;
664}
665
666static int ep_open(struct inode *inode, struct file *file)
667{
668 struct c4iw_ep *ep;
669 struct c4iw_listen_ep *lep;
670 unsigned long index;
671 struct c4iw_debugfs_data *epd;
672 int ret = 0;
673 int count = 1;
674
675 epd = kmalloc(sizeof(*epd), GFP_KERNEL);
676 if (!epd) {
677 ret = -ENOMEM;
678 goto out;
679 }
680 epd->devp = inode->i_private;
681 epd->pos = 0;
682
683 xa_for_each(&epd->devp->hwtids, index, ep)
684 count++;
685 xa_for_each(&epd->devp->atids, index, ep)
686 count++;
687 xa_for_each(&epd->devp->stids, index, lep)
688 count++;
689
690 epd->bufsize = count * 240;
691 epd->buf = vmalloc(epd->bufsize);
692 if (!epd->buf) {
693 ret = -ENOMEM;
694 goto err1;
695 }
696
697 xa_lock_irq(&epd->devp->hwtids);
698 xa_for_each(&epd->devp->hwtids, index, ep)
699 dump_ep(ep, epd);
700 xa_unlock_irq(&epd->devp->hwtids);
701 xa_lock_irq(&epd->devp->atids);
702 xa_for_each(&epd->devp->atids, index, ep)
703 dump_ep(ep, epd);
704 xa_unlock_irq(&epd->devp->atids);
705 xa_lock_irq(&epd->devp->stids);
706 xa_for_each(&epd->devp->stids, index, lep)
707 dump_listen_ep(lep, epd);
708 xa_unlock_irq(&epd->devp->stids);
709
710 file->private_data = epd;
711 goto out;
712err1:
713 kfree(epd);
714out:
715 return ret;
716}
717
718static const struct file_operations ep_debugfs_fops = {
719 .owner = THIS_MODULE,
720 .open = ep_open,
721 .release = ep_release,
722 .read = debugfs_read,
723};
724
725static void setup_debugfs(struct c4iw_dev *devp)
726{
727 debugfs_create_file_size("qps", S_IWUSR, devp->debugfs_root,
728 (void *)devp, &qp_debugfs_fops, 4096);
729
730 debugfs_create_file_size("stags", S_IWUSR, devp->debugfs_root,
731 (void *)devp, &stag_debugfs_fops, 4096);
732
733 debugfs_create_file_size("stats", S_IWUSR, devp->debugfs_root,
734 (void *)devp, &stats_debugfs_fops, 4096);
735
736 debugfs_create_file_size("eps", S_IWUSR, devp->debugfs_root,
737 (void *)devp, &ep_debugfs_fops, 4096);
738
739 if (c4iw_wr_log)
740 debugfs_create_file_size("wr_log", S_IWUSR, devp->debugfs_root,
741 (void *)devp, &wr_log_debugfs_fops, 4096);
742}
743
744void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
745 struct c4iw_dev_ucontext *uctx)
746{
747 struct list_head *pos, *nxt;
748 struct c4iw_qid_list *entry;
749
750 mutex_lock(&uctx->lock);
751 list_for_each_safe(pos, nxt, &uctx->qpids) {
752 entry = list_entry(pos, struct c4iw_qid_list, entry);
753 list_del_init(&entry->entry);
754 if (!(entry->qid & rdev->qpmask)) {
755 c4iw_put_resource(&rdev->resource.qid_table,
756 entry->qid);
757 mutex_lock(&rdev->stats.lock);
758 rdev->stats.qid.cur -= rdev->qpmask + 1;
759 mutex_unlock(&rdev->stats.lock);
760 }
761 kfree(entry);
762 }
763
764 list_for_each_safe(pos, nxt, &uctx->cqids) {
765 entry = list_entry(pos, struct c4iw_qid_list, entry);
766 list_del_init(&entry->entry);
767 kfree(entry);
768 }
769 mutex_unlock(&uctx->lock);
770}
771
772void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
773 struct c4iw_dev_ucontext *uctx)
774{
775 INIT_LIST_HEAD(&uctx->qpids);
776 INIT_LIST_HEAD(&uctx->cqids);
777 mutex_init(&uctx->lock);
778}
779
780
781static int c4iw_rdev_open(struct c4iw_rdev *rdev)
782{
783 int err;
784 unsigned int factor;
785
786 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
787
788
789
790
791
792
793 if (rdev->lldi.udb_density != rdev->lldi.ucq_density) {
794 pr_err("%s: unsupported udb/ucq densities %u/%u\n",
795 pci_name(rdev->lldi.pdev), rdev->lldi.udb_density,
796 rdev->lldi.ucq_density);
797 return -EINVAL;
798 }
799 if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
800 rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
801 pr_err("%s: unsupported qp and cq id ranges qp start %u size %u cq start %u size %u\n",
802 pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
803 rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
804 rdev->lldi.vr->cq.size);
805 return -EINVAL;
806 }
807
808
809 if (rdev->lldi.sge_host_page_size > PAGE_SIZE) {
810 pr_err("%s: unsupported sge host page size %u\n",
811 pci_name(rdev->lldi.pdev),
812 rdev->lldi.sge_host_page_size);
813 return -EINVAL;
814 }
815
816 factor = PAGE_SIZE / rdev->lldi.sge_host_page_size;
817 rdev->qpmask = (rdev->lldi.udb_density * factor) - 1;
818 rdev->cqmask = (rdev->lldi.ucq_density * factor) - 1;
819
820 pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n",
821 pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
822 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
823 rdev->lldi.vr->pbl.start,
824 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
825 rdev->lldi.vr->rq.size,
826 rdev->lldi.vr->qp.start,
827 rdev->lldi.vr->qp.size,
828 rdev->lldi.vr->cq.start,
829 rdev->lldi.vr->cq.size,
830 rdev->lldi.vr->srq.size);
831 pr_debug("udb %pR db_reg %p gts_reg %p qpmask 0x%x cqmask 0x%x\n",
832 &rdev->lldi.pdev->resource[2],
833 rdev->lldi.db_reg, rdev->lldi.gts_reg,
834 rdev->qpmask, rdev->cqmask);
835
836 if (c4iw_num_stags(rdev) == 0)
837 return -EINVAL;
838
839 rdev->stats.pd.total = T4_MAX_NUM_PD;
840 rdev->stats.stag.total = rdev->lldi.vr->stag.size;
841 rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
842 rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
843 rdev->stats.srqt.total = rdev->lldi.vr->srq.size;
844 rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
845 rdev->stats.qid.total = rdev->lldi.vr->qp.size;
846
847 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev),
848 T4_MAX_NUM_PD, rdev->lldi.vr->srq.size);
849 if (err) {
850 pr_err("error %d initializing resources\n", err);
851 return err;
852 }
853 err = c4iw_pblpool_create(rdev);
854 if (err) {
855 pr_err("error %d initializing pbl pool\n", err);
856 goto destroy_resource;
857 }
858 err = c4iw_rqtpool_create(rdev);
859 if (err) {
860 pr_err("error %d initializing rqt pool\n", err);
861 goto destroy_pblpool;
862 }
863 err = c4iw_ocqp_pool_create(rdev);
864 if (err) {
865 pr_err("error %d initializing ocqp pool\n", err);
866 goto destroy_rqtpool;
867 }
868 rdev->status_page = (struct t4_dev_status_page *)
869 __get_free_page(GFP_KERNEL);
870 if (!rdev->status_page) {
871 err = -ENOMEM;
872 goto destroy_ocqp_pool;
873 }
874 rdev->status_page->qp_start = rdev->lldi.vr->qp.start;
875 rdev->status_page->qp_size = rdev->lldi.vr->qp.size;
876 rdev->status_page->cq_start = rdev->lldi.vr->cq.start;
877 rdev->status_page->cq_size = rdev->lldi.vr->cq.size;
878 rdev->status_page->write_cmpl_supported = rdev->lldi.write_cmpl_support;
879
880 if (c4iw_wr_log) {
881 rdev->wr_log = kcalloc(1 << c4iw_wr_log_size_order,
882 sizeof(*rdev->wr_log),
883 GFP_KERNEL);
884 if (rdev->wr_log) {
885 rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
886 atomic_set(&rdev->wr_log_idx, 0);
887 }
888 }
889
890 rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
891 if (!rdev->free_workq) {
892 err = -ENOMEM;
893 goto err_free_status_page_and_wr_log;
894 }
895
896 rdev->status_page->db_off = 0;
897
898 init_completion(&rdev->rqt_compl);
899 init_completion(&rdev->pbl_compl);
900 kref_init(&rdev->rqt_kref);
901 kref_init(&rdev->pbl_kref);
902
903 return 0;
904err_free_status_page_and_wr_log:
905 if (c4iw_wr_log && rdev->wr_log)
906 kfree(rdev->wr_log);
907 free_page((unsigned long)rdev->status_page);
908destroy_ocqp_pool:
909 c4iw_ocqp_pool_destroy(rdev);
910destroy_rqtpool:
911 c4iw_rqtpool_destroy(rdev);
912destroy_pblpool:
913 c4iw_pblpool_destroy(rdev);
914destroy_resource:
915 c4iw_destroy_resource(&rdev->resource);
916 return err;
917}
918
919static void c4iw_rdev_close(struct c4iw_rdev *rdev)
920{
921 kfree(rdev->wr_log);
922 c4iw_release_dev_ucontext(rdev, &rdev->uctx);
923 free_page((unsigned long)rdev->status_page);
924 c4iw_pblpool_destroy(rdev);
925 c4iw_rqtpool_destroy(rdev);
926 wait_for_completion(&rdev->pbl_compl);
927 wait_for_completion(&rdev->rqt_compl);
928 c4iw_ocqp_pool_destroy(rdev);
929 destroy_workqueue(rdev->free_workq);
930 c4iw_destroy_resource(&rdev->resource);
931}
932
933void c4iw_dealloc(struct uld_ctx *ctx)
934{
935 c4iw_rdev_close(&ctx->dev->rdev);
936 WARN_ON(!xa_empty(&ctx->dev->cqs));
937 WARN_ON(!xa_empty(&ctx->dev->qps));
938 WARN_ON(!xa_empty(&ctx->dev->mrs));
939 wait_event(ctx->dev->wait, xa_empty(&ctx->dev->hwtids));
940 WARN_ON(!xa_empty(&ctx->dev->stids));
941 WARN_ON(!xa_empty(&ctx->dev->atids));
942 if (ctx->dev->rdev.bar2_kva)
943 iounmap(ctx->dev->rdev.bar2_kva);
944 if (ctx->dev->rdev.oc_mw_kva)
945 iounmap(ctx->dev->rdev.oc_mw_kva);
946 ib_dealloc_device(&ctx->dev->ibdev);
947 ctx->dev = NULL;
948}
949
950static void c4iw_remove(struct uld_ctx *ctx)
951{
952 pr_debug("c4iw_dev %p\n", ctx->dev);
953 c4iw_unregister_device(ctx->dev);
954 c4iw_dealloc(ctx);
955}
956
957static int rdma_supported(const struct cxgb4_lld_info *infop)
958{
959 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
960 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
961 infop->vr->cq.size > 0;
962}
963
964static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
965{
966 struct c4iw_dev *devp;
967 int ret;
968
969 if (!rdma_supported(infop)) {
970 pr_info("%s: RDMA not supported on this device\n",
971 pci_name(infop->pdev));
972 return ERR_PTR(-ENOSYS);
973 }
974 if (!ocqp_supported(infop))
975 pr_info("%s: On-Chip Queues not supported on this device\n",
976 pci_name(infop->pdev));
977
978 devp = ib_alloc_device(c4iw_dev, ibdev);
979 if (!devp) {
980 pr_err("Cannot allocate ib device\n");
981 return ERR_PTR(-ENOMEM);
982 }
983 devp->rdev.lldi = *infop;
984
985
986 pr_debug("Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
987 devp->rdev.lldi.sge_ingpadboundary,
988 devp->rdev.lldi.sge_egrstatuspagesize);
989
990 devp->rdev.hw_queue.t4_eq_status_entries =
991 devp->rdev.lldi.sge_egrstatuspagesize / 64;
992 devp->rdev.hw_queue.t4_max_eq_size = 65520;
993 devp->rdev.hw_queue.t4_max_iq_size = 65520;
994 devp->rdev.hw_queue.t4_max_rq_size = 8192 -
995 devp->rdev.hw_queue.t4_eq_status_entries - 1;
996 devp->rdev.hw_queue.t4_max_sq_size =
997 devp->rdev.hw_queue.t4_max_eq_size -
998 devp->rdev.hw_queue.t4_eq_status_entries - 1;
999 devp->rdev.hw_queue.t4_max_qp_depth =
1000 devp->rdev.hw_queue.t4_max_rq_size;
1001 devp->rdev.hw_queue.t4_max_cq_depth =
1002 devp->rdev.hw_queue.t4_max_iq_size - 2;
1003 devp->rdev.hw_queue.t4_stat_len =
1004 devp->rdev.lldi.sge_egrstatuspagesize;
1005
1006
1007
1008
1009
1010
1011 devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2);
1012 if (!is_t4(devp->rdev.lldi.adapter_type)) {
1013 devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa,
1014 pci_resource_len(devp->rdev.lldi.pdev, 2));
1015 if (!devp->rdev.bar2_kva) {
1016 pr_err("Unable to ioremap BAR2\n");
1017 ib_dealloc_device(&devp->ibdev);
1018 return ERR_PTR(-EINVAL);
1019 }
1020 } else if (ocqp_supported(infop)) {
1021 devp->rdev.oc_mw_pa =
1022 pci_resource_start(devp->rdev.lldi.pdev, 2) +
1023 pci_resource_len(devp->rdev.lldi.pdev, 2) -
1024 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size);
1025 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
1026 devp->rdev.lldi.vr->ocq.size);
1027 if (!devp->rdev.oc_mw_kva) {
1028 pr_err("Unable to ioremap onchip mem\n");
1029 ib_dealloc_device(&devp->ibdev);
1030 return ERR_PTR(-EINVAL);
1031 }
1032 }
1033
1034 pr_debug("ocq memory: hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
1035 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
1036 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
1037
1038 ret = c4iw_rdev_open(&devp->rdev);
1039 if (ret) {
1040 pr_err("Unable to open CXIO rdev err %d\n", ret);
1041 ib_dealloc_device(&devp->ibdev);
1042 return ERR_PTR(ret);
1043 }
1044
1045 xa_init_flags(&devp->cqs, XA_FLAGS_LOCK_IRQ);
1046 xa_init_flags(&devp->qps, XA_FLAGS_LOCK_IRQ);
1047 xa_init_flags(&devp->mrs, XA_FLAGS_LOCK_IRQ);
1048 xa_init_flags(&devp->hwtids, XA_FLAGS_LOCK_IRQ);
1049 xa_init_flags(&devp->atids, XA_FLAGS_LOCK_IRQ);
1050 xa_init_flags(&devp->stids, XA_FLAGS_LOCK_IRQ);
1051 mutex_init(&devp->rdev.stats.lock);
1052 mutex_init(&devp->db_mutex);
1053 INIT_LIST_HEAD(&devp->db_fc_list);
1054 init_waitqueue_head(&devp->wait);
1055 devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
1056
1057 if (c4iw_debugfs_root) {
1058 devp->debugfs_root = debugfs_create_dir(
1059 pci_name(devp->rdev.lldi.pdev),
1060 c4iw_debugfs_root);
1061 setup_debugfs(devp);
1062 }
1063
1064
1065 return devp;
1066}
1067
1068static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
1069{
1070 struct uld_ctx *ctx;
1071 static int vers_printed;
1072 int i;
1073
1074 if (!vers_printed++)
1075 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
1076 DRV_VERSION);
1077
1078 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1079 if (!ctx) {
1080 ctx = ERR_PTR(-ENOMEM);
1081 goto out;
1082 }
1083 ctx->lldi = *infop;
1084
1085 pr_debug("found device %s nchan %u nrxq %u ntxq %u nports %u\n",
1086 pci_name(ctx->lldi.pdev),
1087 ctx->lldi.nchan, ctx->lldi.nrxq,
1088 ctx->lldi.ntxq, ctx->lldi.nports);
1089
1090 mutex_lock(&dev_mutex);
1091 list_add_tail(&ctx->entry, &uld_ctx_list);
1092 mutex_unlock(&dev_mutex);
1093
1094 for (i = 0; i < ctx->lldi.nrxq; i++)
1095 pr_debug("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
1096out:
1097 return ctx;
1098}
1099
1100static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
1101 const __be64 *rsp,
1102 u32 pktshift)
1103{
1104 struct sk_buff *skb;
1105
1106
1107
1108
1109
1110
1111
1112
1113 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
1114 sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
1115 if (unlikely(!skb))
1116 return NULL;
1117
1118 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
1119 sizeof(struct rss_header) - pktshift);
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
1130 sizeof(struct rss_header));
1131 skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
1132 sizeof(struct cpl_pass_accept_req),
1133 gl->va + pktshift,
1134 gl->tot_len - pktshift);
1135 return skb;
1136}
1137
1138static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
1139 const __be64 *rsp)
1140{
1141 unsigned int opcode = *(u8 *)rsp;
1142 struct sk_buff *skb;
1143
1144 if (opcode != CPL_RX_PKT)
1145 goto out;
1146
1147 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
1148 if (skb == NULL)
1149 goto out;
1150
1151 if (c4iw_handlers[opcode] == NULL) {
1152 pr_info("%s no handler opcode 0x%x...\n", __func__, opcode);
1153 kfree_skb(skb);
1154 goto out;
1155 }
1156 c4iw_handlers[opcode](dev, skb);
1157 return 1;
1158out:
1159 return 0;
1160}
1161
1162static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
1163 const struct pkt_gl *gl)
1164{
1165 struct uld_ctx *ctx = handle;
1166 struct c4iw_dev *dev = ctx->dev;
1167 struct sk_buff *skb;
1168 u8 opcode;
1169
1170 if (gl == NULL) {
1171
1172 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
1173
1174 skb = alloc_skb(256, GFP_ATOMIC);
1175 if (!skb)
1176 goto nomem;
1177 __skb_put(skb, len);
1178 skb_copy_to_linear_data(skb, &rsp[1], len);
1179 } else if (gl == CXGB4_MSG_AN) {
1180 const struct rsp_ctrl *rc = (void *)rsp;
1181
1182 u32 qid = be32_to_cpu(rc->pldbuflen_qid);
1183 c4iw_ev_handler(dev, qid);
1184 return 0;
1185 } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
1186 if (recv_rx_pkt(dev, gl, rsp))
1187 return 0;
1188
1189 pr_info("%s: unexpected FL contents at %p, RSS %#llx, FL %#llx, len %u\n",
1190 pci_name(ctx->lldi.pdev), gl->va,
1191 be64_to_cpu(*rsp),
1192 be64_to_cpu(*(__force __be64 *)gl->va),
1193 gl->tot_len);
1194
1195 return 0;
1196 } else {
1197 skb = cxgb4_pktgl_to_skb(gl, 128, 128);
1198 if (unlikely(!skb))
1199 goto nomem;
1200 }
1201
1202 opcode = *(u8 *)rsp;
1203 if (c4iw_handlers[opcode]) {
1204 c4iw_handlers[opcode](dev, skb);
1205 } else {
1206 pr_info("%s no handler opcode 0x%x...\n", __func__, opcode);
1207 kfree_skb(skb);
1208 }
1209
1210 return 0;
1211nomem:
1212 return -1;
1213}
1214
1215static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
1216{
1217 struct uld_ctx *ctx = handle;
1218
1219 pr_debug("new_state %u\n", new_state);
1220 switch (new_state) {
1221 case CXGB4_STATE_UP:
1222 pr_info("%s: Up\n", pci_name(ctx->lldi.pdev));
1223 if (!ctx->dev) {
1224 ctx->dev = c4iw_alloc(&ctx->lldi);
1225 if (IS_ERR(ctx->dev)) {
1226 pr_err("%s: initialization failed: %ld\n",
1227 pci_name(ctx->lldi.pdev),
1228 PTR_ERR(ctx->dev));
1229 ctx->dev = NULL;
1230 break;
1231 }
1232
1233 INIT_WORK(&ctx->reg_work, c4iw_register_device);
1234 queue_work(reg_workq, &ctx->reg_work);
1235 }
1236 break;
1237 case CXGB4_STATE_DOWN:
1238 pr_info("%s: Down\n", pci_name(ctx->lldi.pdev));
1239 if (ctx->dev)
1240 c4iw_remove(ctx);
1241 break;
1242 case CXGB4_STATE_FATAL_ERROR:
1243 case CXGB4_STATE_START_RECOVERY:
1244 pr_info("%s: Fatal Error\n", pci_name(ctx->lldi.pdev));
1245 if (ctx->dev) {
1246 struct ib_event event = {};
1247
1248 ctx->dev->rdev.flags |= T4_FATAL_ERROR;
1249 event.event = IB_EVENT_DEVICE_FATAL;
1250 event.device = &ctx->dev->ibdev;
1251 ib_dispatch_event(&event);
1252 c4iw_remove(ctx);
1253 }
1254 break;
1255 case CXGB4_STATE_DETACH:
1256 pr_info("%s: Detach\n", pci_name(ctx->lldi.pdev));
1257 if (ctx->dev)
1258 c4iw_remove(ctx);
1259 break;
1260 }
1261 return 0;
1262}
1263
1264static void stop_queues(struct uld_ctx *ctx)
1265{
1266 struct c4iw_qp *qp;
1267 unsigned long index, flags;
1268
1269 xa_lock_irqsave(&ctx->dev->qps, flags);
1270 ctx->dev->rdev.stats.db_state_transitions++;
1271 ctx->dev->db_state = STOPPED;
1272 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
1273 xa_for_each(&ctx->dev->qps, index, qp)
1274 t4_disable_wq_db(&qp->wq);
1275 } else {
1276 ctx->dev->rdev.status_page->db_off = 1;
1277 }
1278 xa_unlock_irqrestore(&ctx->dev->qps, flags);
1279}
1280
1281static void resume_rc_qp(struct c4iw_qp *qp)
1282{
1283 spin_lock(&qp->lock);
1284 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, NULL);
1285 qp->wq.sq.wq_pidx_inc = 0;
1286 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, NULL);
1287 qp->wq.rq.wq_pidx_inc = 0;
1288 spin_unlock(&qp->lock);
1289}
1290
1291static void resume_a_chunk(struct uld_ctx *ctx)
1292{
1293 int i;
1294 struct c4iw_qp *qp;
1295
1296 for (i = 0; i < DB_FC_RESUME_SIZE; i++) {
1297 qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp,
1298 db_fc_entry);
1299 list_del_init(&qp->db_fc_entry);
1300 resume_rc_qp(qp);
1301 if (list_empty(&ctx->dev->db_fc_list))
1302 break;
1303 }
1304}
1305
1306static void resume_queues(struct uld_ctx *ctx)
1307{
1308 xa_lock_irq(&ctx->dev->qps);
1309 if (ctx->dev->db_state != STOPPED)
1310 goto out;
1311 ctx->dev->db_state = FLOW_CONTROL;
1312 while (1) {
1313 if (list_empty(&ctx->dev->db_fc_list)) {
1314 struct c4iw_qp *qp;
1315 unsigned long index;
1316
1317 WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
1318 ctx->dev->db_state = NORMAL;
1319 ctx->dev->rdev.stats.db_state_transitions++;
1320 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
1321 xa_for_each(&ctx->dev->qps, index, qp)
1322 t4_enable_wq_db(&qp->wq);
1323 } else {
1324 ctx->dev->rdev.status_page->db_off = 0;
1325 }
1326 break;
1327 } else {
1328 if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1)
1329 < (ctx->dev->rdev.lldi.dbfifo_int_thresh <<
1330 DB_FC_DRAIN_THRESH)) {
1331 resume_a_chunk(ctx);
1332 }
1333 if (!list_empty(&ctx->dev->db_fc_list)) {
1334 xa_unlock_irq(&ctx->dev->qps);
1335 if (DB_FC_RESUME_DELAY) {
1336 set_current_state(TASK_UNINTERRUPTIBLE);
1337 schedule_timeout(DB_FC_RESUME_DELAY);
1338 }
1339 xa_lock_irq(&ctx->dev->qps);
1340 if (ctx->dev->db_state != FLOW_CONTROL)
1341 break;
1342 }
1343 }
1344 }
1345out:
1346 if (ctx->dev->db_state != NORMAL)
1347 ctx->dev->rdev.stats.db_fc_interruptions++;
1348 xa_unlock_irq(&ctx->dev->qps);
1349}
1350
1351struct qp_list {
1352 unsigned idx;
1353 struct c4iw_qp **qps;
1354};
1355
1356static void deref_qps(struct qp_list *qp_list)
1357{
1358 int idx;
1359
1360 for (idx = 0; idx < qp_list->idx; idx++)
1361 c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp);
1362}
1363
1364static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
1365{
1366 int idx;
1367 int ret;
1368
1369 for (idx = 0; idx < qp_list->idx; idx++) {
1370 struct c4iw_qp *qp = qp_list->qps[idx];
1371
1372 xa_lock_irq(&qp->rhp->qps);
1373 spin_lock(&qp->lock);
1374 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1375 qp->wq.sq.qid,
1376 t4_sq_host_wq_pidx(&qp->wq),
1377 t4_sq_wq_size(&qp->wq));
1378 if (ret) {
1379 pr_err("%s: Fatal error - DB overflow recovery failed - error syncing SQ qid %u\n",
1380 pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
1381 spin_unlock(&qp->lock);
1382 xa_unlock_irq(&qp->rhp->qps);
1383 return;
1384 }
1385 qp->wq.sq.wq_pidx_inc = 0;
1386
1387 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1388 qp->wq.rq.qid,
1389 t4_rq_host_wq_pidx(&qp->wq),
1390 t4_rq_wq_size(&qp->wq));
1391
1392 if (ret) {
1393 pr_err("%s: Fatal error - DB overflow recovery failed - error syncing RQ qid %u\n",
1394 pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
1395 spin_unlock(&qp->lock);
1396 xa_unlock_irq(&qp->rhp->qps);
1397 return;
1398 }
1399 qp->wq.rq.wq_pidx_inc = 0;
1400 spin_unlock(&qp->lock);
1401 xa_unlock_irq(&qp->rhp->qps);
1402
1403
1404 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
1405 set_current_state(TASK_UNINTERRUPTIBLE);
1406 schedule_timeout(usecs_to_jiffies(10));
1407 }
1408 }
1409}
1410
1411static void recover_queues(struct uld_ctx *ctx)
1412{
1413 struct c4iw_qp *qp;
1414 unsigned long index;
1415 int count = 0;
1416 struct qp_list qp_list;
1417 int ret;
1418
1419
1420 set_current_state(TASK_UNINTERRUPTIBLE);
1421 schedule_timeout(usecs_to_jiffies(1000));
1422
1423
1424 ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
1425 if (ret) {
1426 pr_err("%s: Fatal error - DB overflow recovery failed\n",
1427 pci_name(ctx->lldi.pdev));
1428 return;
1429 }
1430
1431
1432 xa_lock_irq(&ctx->dev->qps);
1433 WARN_ON(ctx->dev->db_state != STOPPED);
1434 ctx->dev->db_state = RECOVERY;
1435 xa_for_each(&ctx->dev->qps, index, qp)
1436 count++;
1437
1438 qp_list.qps = kcalloc(count, sizeof(*qp_list.qps), GFP_ATOMIC);
1439 if (!qp_list.qps) {
1440 xa_unlock_irq(&ctx->dev->qps);
1441 return;
1442 }
1443 qp_list.idx = 0;
1444
1445
1446 xa_for_each(&ctx->dev->qps, index, qp) {
1447 c4iw_qp_add_ref(&qp->ibqp);
1448 qp_list.qps[qp_list.idx++] = qp;
1449 }
1450
1451 xa_unlock_irq(&ctx->dev->qps);
1452
1453
1454 recover_lost_dbs(ctx, &qp_list);
1455
1456
1457 deref_qps(&qp_list);
1458 kfree(qp_list.qps);
1459
1460 xa_lock_irq(&ctx->dev->qps);
1461 WARN_ON(ctx->dev->db_state != RECOVERY);
1462 ctx->dev->db_state = STOPPED;
1463 xa_unlock_irq(&ctx->dev->qps);
1464}
1465
1466static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
1467{
1468 struct uld_ctx *ctx = handle;
1469
1470 switch (control) {
1471 case CXGB4_CONTROL_DB_FULL:
1472 stop_queues(ctx);
1473 ctx->dev->rdev.stats.db_full++;
1474 break;
1475 case CXGB4_CONTROL_DB_EMPTY:
1476 resume_queues(ctx);
1477 mutex_lock(&ctx->dev->rdev.stats.lock);
1478 ctx->dev->rdev.stats.db_empty++;
1479 mutex_unlock(&ctx->dev->rdev.stats.lock);
1480 break;
1481 case CXGB4_CONTROL_DB_DROP:
1482 recover_queues(ctx);
1483 mutex_lock(&ctx->dev->rdev.stats.lock);
1484 ctx->dev->rdev.stats.db_drop++;
1485 mutex_unlock(&ctx->dev->rdev.stats.lock);
1486 break;
1487 default:
1488 pr_warn("%s: unknown control cmd %u\n",
1489 pci_name(ctx->lldi.pdev), control);
1490 break;
1491 }
1492 return 0;
1493}
1494
1495static struct cxgb4_uld_info c4iw_uld_info = {
1496 .name = DRV_NAME,
1497 .nrxq = MAX_ULD_QSETS,
1498 .ntxq = MAX_ULD_QSETS,
1499 .rxq_size = 511,
1500 .ciq = true,
1501 .lro = false,
1502 .add = c4iw_uld_add,
1503 .rx_handler = c4iw_uld_rx_handler,
1504 .state_change = c4iw_uld_state_change,
1505 .control = c4iw_uld_control,
1506};
1507
1508void _c4iw_free_wr_wait(struct kref *kref)
1509{
1510 struct c4iw_wr_wait *wr_waitp;
1511
1512 wr_waitp = container_of(kref, struct c4iw_wr_wait, kref);
1513 pr_debug("Free wr_wait %p\n", wr_waitp);
1514 kfree(wr_waitp);
1515}
1516
1517struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp)
1518{
1519 struct c4iw_wr_wait *wr_waitp;
1520
1521 wr_waitp = kzalloc(sizeof(*wr_waitp), gfp);
1522 if (wr_waitp) {
1523 kref_init(&wr_waitp->kref);
1524 pr_debug("wr_wait %p\n", wr_waitp);
1525 }
1526 return wr_waitp;
1527}
1528
1529static int __init c4iw_init_module(void)
1530{
1531 int err;
1532
1533 err = c4iw_cm_init();
1534 if (err)
1535 return err;
1536
1537 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
1538
1539 reg_workq = create_singlethread_workqueue("Register_iWARP_device");
1540 if (!reg_workq) {
1541 pr_err("Failed creating workqueue to register iwarp device\n");
1542 return -ENOMEM;
1543 }
1544
1545 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
1546
1547 return 0;
1548}
1549
1550static void __exit c4iw_exit_module(void)
1551{
1552 struct uld_ctx *ctx, *tmp;
1553
1554 mutex_lock(&dev_mutex);
1555 list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
1556 if (ctx->dev)
1557 c4iw_remove(ctx);
1558 kfree(ctx);
1559 }
1560 mutex_unlock(&dev_mutex);
1561 flush_workqueue(reg_workq);
1562 destroy_workqueue(reg_workq);
1563 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
1564 c4iw_cm_term();
1565 debugfs_remove_recursive(c4iw_debugfs_root);
1566}
1567
1568module_init(c4iw_init_module);
1569module_exit(c4iw_exit_module);
1570