1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/ctype.h>
40#include <linux/kthread.h>
41#include <linux/string.h>
42#include <linux/delay.h>
43#include <linux/atomic.h>
44#include <linux/inet.h>
45#include <rdma/ib_cache.h>
46#include <scsi/scsi_proto.h>
47#include <scsi/scsi_tcq.h>
48#include <target/target_core_base.h>
49#include <target/target_core_fabric.h>
50#include "ib_srpt.h"
51
52
53#define DRV_NAME "ib_srpt"
54
55#define SRPT_ID_STRING "Linux SRP target"
56
57#undef pr_fmt
58#define pr_fmt(fmt) DRV_NAME " " fmt
59
60MODULE_AUTHOR("Vu Pham and Bart Van Assche");
61MODULE_DESCRIPTION("SCSI RDMA Protocol target driver");
62MODULE_LICENSE("Dual BSD/GPL");
63
64
65
66
67
68static u64 srpt_service_guid;
69static DEFINE_SPINLOCK(srpt_dev_lock);
70static LIST_HEAD(srpt_dev_list);
71
72static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
73module_param(srp_max_req_size, int, 0444);
74MODULE_PARM_DESC(srp_max_req_size,
75 "Maximum size of SRP request messages in bytes.");
76
77static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
78module_param(srpt_srq_size, int, 0444);
79MODULE_PARM_DESC(srpt_srq_size,
80 "Shared receive queue (SRQ) size.");
81
82static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
83{
84 return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg);
85}
86module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
87 0444);
88MODULE_PARM_DESC(srpt_service_guid,
89 "Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA.");
90
91static struct ib_client srpt_client;
92
93static DEFINE_MUTEX(rdma_cm_mutex);
94
95static u16 rdma_cm_port;
96static struct rdma_cm_id *rdma_cm_id;
97static void srpt_release_cmd(struct se_cmd *se_cmd);
98static void srpt_free_ch(struct kref *kref);
99static int srpt_queue_status(struct se_cmd *cmd);
100static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
101static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
102static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
103
104
105
106
107
108static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
109{
110 unsigned long flags;
111 enum rdma_ch_state prev;
112 bool changed = false;
113
114 spin_lock_irqsave(&ch->spinlock, flags);
115 prev = ch->state;
116 if (new > prev) {
117 ch->state = new;
118 changed = true;
119 }
120 spin_unlock_irqrestore(&ch->spinlock, flags);
121
122 return changed;
123}
124
125
126
127
128
129
130
131
132
133
134
135static void srpt_event_handler(struct ib_event_handler *handler,
136 struct ib_event *event)
137{
138 struct srpt_device *sdev =
139 container_of(handler, struct srpt_device, event_handler);
140 struct srpt_port *sport;
141 u8 port_num;
142
143 pr_debug("ASYNC event= %d on device= %s\n", event->event,
144 dev_name(&sdev->device->dev));
145
146 switch (event->event) {
147 case IB_EVENT_PORT_ERR:
148 port_num = event->element.port_num - 1;
149 if (port_num < sdev->device->phys_port_cnt) {
150 sport = &sdev->port[port_num];
151 sport->lid = 0;
152 sport->sm_lid = 0;
153 } else {
154 WARN(true, "event %d: port_num %d out of range 1..%d\n",
155 event->event, port_num + 1,
156 sdev->device->phys_port_cnt);
157 }
158 break;
159 case IB_EVENT_PORT_ACTIVE:
160 case IB_EVENT_LID_CHANGE:
161 case IB_EVENT_PKEY_CHANGE:
162 case IB_EVENT_SM_CHANGE:
163 case IB_EVENT_CLIENT_REREGISTER:
164 case IB_EVENT_GID_CHANGE:
165
166 port_num = event->element.port_num - 1;
167 if (port_num < sdev->device->phys_port_cnt) {
168 sport = &sdev->port[port_num];
169 if (!sport->lid && !sport->sm_lid)
170 schedule_work(&sport->work);
171 } else {
172 WARN(true, "event %d: port_num %d out of range 1..%d\n",
173 event->event, port_num + 1,
174 sdev->device->phys_port_cnt);
175 }
176 break;
177 default:
178 pr_err("received unrecognized IB event %d\n", event->event);
179 break;
180 }
181}
182
183
184
185
186
187
188static void srpt_srq_event(struct ib_event *event, void *ctx)
189{
190 pr_debug("SRQ event %d\n", event->event);
191}
192
193static const char *get_ch_state_name(enum rdma_ch_state s)
194{
195 switch (s) {
196 case CH_CONNECTING:
197 return "connecting";
198 case CH_LIVE:
199 return "live";
200 case CH_DISCONNECTING:
201 return "disconnecting";
202 case CH_DRAINING:
203 return "draining";
204 case CH_DISCONNECTED:
205 return "disconnected";
206 }
207 return "???";
208}
209
210
211
212
213
214
215static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
216{
217 pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
218 event->event, ch, ch->sess_name, ch->qp->qp_num,
219 get_ch_state_name(ch->state));
220
221 switch (event->event) {
222 case IB_EVENT_COMM_EST:
223 if (ch->using_rdma_cm)
224 rdma_notify(ch->rdma_cm.cm_id, event->event);
225 else
226 ib_cm_notify(ch->ib_cm.cm_id, event->event);
227 break;
228 case IB_EVENT_QP_LAST_WQE_REACHED:
229 pr_debug("%s-%d, state %s: received Last WQE event.\n",
230 ch->sess_name, ch->qp->qp_num,
231 get_ch_state_name(ch->state));
232 break;
233 default:
234 pr_err("received unrecognized IB QP event %d\n", event->event);
235 break;
236 }
237}
238
239
240
241
242
243
244
245
246
247
248static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
249{
250 u16 id;
251 u8 tmp;
252
253 id = (slot - 1) / 2;
254 if (slot & 0x1) {
255 tmp = c_list[id] & 0xf;
256 c_list[id] = (value << 4) | tmp;
257 } else {
258 tmp = c_list[id] & 0xf0;
259 c_list[id] = (value & 0xf) | tmp;
260 }
261}
262
263
264
265
266
267
268
269
270static void srpt_get_class_port_info(struct ib_dm_mad *mad)
271{
272 struct ib_class_port_info *cif;
273
274 cif = (struct ib_class_port_info *)mad->data;
275 memset(cif, 0, sizeof(*cif));
276 cif->base_version = 1;
277 cif->class_version = 1;
278
279 ib_set_cpi_resp_time(cif, 20);
280 mad->mad_hdr.status = 0;
281}
282
283
284
285
286
287
288
289
290static void srpt_get_iou(struct ib_dm_mad *mad)
291{
292 struct ib_dm_iou_info *ioui;
293 u8 slot;
294 int i;
295
296 ioui = (struct ib_dm_iou_info *)mad->data;
297 ioui->change_id = cpu_to_be16(1);
298 ioui->max_controllers = 16;
299
300
301 srpt_set_ioc(ioui->controller_list, 1, 1);
302 for (i = 1, slot = 2; i < 16; i++, slot++)
303 srpt_set_ioc(ioui->controller_list, slot, 0);
304
305 mad->mad_hdr.status = 0;
306}
307
308
309
310
311
312
313
314
315
316
317
318static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
319 struct ib_dm_mad *mad)
320{
321 struct srpt_device *sdev = sport->sdev;
322 struct ib_dm_ioc_profile *iocp;
323 int send_queue_depth;
324
325 iocp = (struct ib_dm_ioc_profile *)mad->data;
326
327 if (!slot || slot > 16) {
328 mad->mad_hdr.status
329 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
330 return;
331 }
332
333 if (slot > 2) {
334 mad->mad_hdr.status
335 = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
336 return;
337 }
338
339 if (sdev->use_srq)
340 send_queue_depth = sdev->srq_size;
341 else
342 send_queue_depth = min(MAX_SRPT_RQ_SIZE,
343 sdev->device->attrs.max_qp_wr);
344
345 memset(iocp, 0, sizeof(*iocp));
346 strcpy(iocp->id_string, SRPT_ID_STRING);
347 iocp->guid = cpu_to_be64(srpt_service_guid);
348 iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
349 iocp->device_id = cpu_to_be32(sdev->device->attrs.vendor_part_id);
350 iocp->device_version = cpu_to_be16(sdev->device->attrs.hw_ver);
351 iocp->subsys_vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
352 iocp->subsys_device_id = 0x0;
353 iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
354 iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
355 iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
356 iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
357 iocp->send_queue_depth = cpu_to_be16(send_queue_depth);
358 iocp->rdma_read_depth = 4;
359 iocp->send_size = cpu_to_be32(srp_max_req_size);
360 iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
361 1U << 24));
362 iocp->num_svc_entries = 1;
363 iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
364 SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
365
366 mad->mad_hdr.status = 0;
367}
368
369
370
371
372
373
374
375
376
377
378
379
380static void srpt_get_svc_entries(u64 ioc_guid,
381 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
382{
383 struct ib_dm_svc_entries *svc_entries;
384
385 WARN_ON(!ioc_guid);
386
387 if (!slot || slot > 16) {
388 mad->mad_hdr.status
389 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
390 return;
391 }
392
393 if (slot > 2 || lo > hi || hi > 1) {
394 mad->mad_hdr.status
395 = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
396 return;
397 }
398
399 svc_entries = (struct ib_dm_svc_entries *)mad->data;
400 memset(svc_entries, 0, sizeof(*svc_entries));
401 svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
402 snprintf(svc_entries->service_entries[0].name,
403 sizeof(svc_entries->service_entries[0].name),
404 "%s%016llx",
405 SRP_SERVICE_NAME_PREFIX,
406 ioc_guid);
407
408 mad->mad_hdr.status = 0;
409}
410
411
412
413
414
415
416
417static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
418 struct ib_dm_mad *rsp_mad)
419{
420 u16 attr_id;
421 u32 slot;
422 u8 hi, lo;
423
424 attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
425 switch (attr_id) {
426 case DM_ATTR_CLASS_PORT_INFO:
427 srpt_get_class_port_info(rsp_mad);
428 break;
429 case DM_ATTR_IOU_INFO:
430 srpt_get_iou(rsp_mad);
431 break;
432 case DM_ATTR_IOC_PROFILE:
433 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
434 srpt_get_ioc(sp, slot, rsp_mad);
435 break;
436 case DM_ATTR_SVC_ENTRIES:
437 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
438 hi = (u8) ((slot >> 8) & 0xff);
439 lo = (u8) (slot & 0xff);
440 slot = (u16) ((slot >> 16) & 0xffff);
441 srpt_get_svc_entries(srpt_service_guid,
442 slot, hi, lo, rsp_mad);
443 break;
444 default:
445 rsp_mad->mad_hdr.status =
446 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
447 break;
448 }
449}
450
451
452
453
454
455
456static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
457 struct ib_mad_send_wc *mad_wc)
458{
459 rdma_destroy_ah(mad_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE);
460 ib_free_send_mad(mad_wc->send_buf);
461}
462
463
464
465
466
467
468
469static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
470 struct ib_mad_send_buf *send_buf,
471 struct ib_mad_recv_wc *mad_wc)
472{
473 struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
474 struct ib_ah *ah;
475 struct ib_mad_send_buf *rsp;
476 struct ib_dm_mad *dm_mad;
477
478 if (!mad_wc || !mad_wc->recv_buf.mad)
479 return;
480
481 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
482 mad_wc->recv_buf.grh, mad_agent->port_num);
483 if (IS_ERR(ah))
484 goto err;
485
486 BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
487
488 rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
489 mad_wc->wc->pkey_index, 0,
490 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
491 GFP_KERNEL,
492 IB_MGMT_BASE_VERSION);
493 if (IS_ERR(rsp))
494 goto err_rsp;
495
496 rsp->ah = ah;
497
498 dm_mad = rsp->mad;
499 memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof(*dm_mad));
500 dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
501 dm_mad->mad_hdr.status = 0;
502
503 switch (mad_wc->recv_buf.mad->mad_hdr.method) {
504 case IB_MGMT_METHOD_GET:
505 srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
506 break;
507 case IB_MGMT_METHOD_SET:
508 dm_mad->mad_hdr.status =
509 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
510 break;
511 default:
512 dm_mad->mad_hdr.status =
513 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
514 break;
515 }
516
517 if (!ib_post_send_mad(rsp, NULL)) {
518 ib_free_recv_mad(mad_wc);
519
520 return;
521 }
522
523 ib_free_send_mad(rsp);
524
525err_rsp:
526 rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
527err:
528 ib_free_recv_mad(mad_wc);
529}
530
531static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid)
532{
533 const __be16 *g = (const __be16 *)guid;
534
535 return snprintf(buf, size, "%04x:%04x:%04x:%04x",
536 be16_to_cpu(g[0]), be16_to_cpu(g[1]),
537 be16_to_cpu(g[2]), be16_to_cpu(g[3]));
538}
539
540
541
542
543
544
545
546
547
548
549
550static int srpt_refresh_port(struct srpt_port *sport)
551{
552 struct ib_mad_reg_req reg_req;
553 struct ib_port_modify port_modify;
554 struct ib_port_attr port_attr;
555 int ret;
556
557 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
558 if (ret)
559 return ret;
560
561 sport->sm_lid = port_attr.sm_lid;
562 sport->lid = port_attr.lid;
563
564 ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
565 if (ret)
566 return ret;
567
568 sport->port_guid_id.wwn.priv = sport;
569 srpt_format_guid(sport->port_guid_id.name,
570 sizeof(sport->port_guid_id.name),
571 &sport->gid.global.interface_id);
572 sport->port_gid_id.wwn.priv = sport;
573 snprintf(sport->port_gid_id.name, sizeof(sport->port_gid_id.name),
574 "0x%016llx%016llx",
575 be64_to_cpu(sport->gid.global.subnet_prefix),
576 be64_to_cpu(sport->gid.global.interface_id));
577
578 if (rdma_protocol_iwarp(sport->sdev->device, sport->port))
579 return 0;
580
581 memset(&port_modify, 0, sizeof(port_modify));
582 port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
583 port_modify.clr_port_cap_mask = 0;
584
585 ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
586 if (ret) {
587 pr_warn("%s-%d: enabling device management failed (%d). Note: this is expected if SR-IOV is enabled.\n",
588 dev_name(&sport->sdev->device->dev), sport->port, ret);
589 return 0;
590 }
591
592 if (!sport->mad_agent) {
593 memset(®_req, 0, sizeof(reg_req));
594 reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
595 reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
596 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
597 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
598
599 sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
600 sport->port,
601 IB_QPT_GSI,
602 ®_req, 0,
603 srpt_mad_send_handler,
604 srpt_mad_recv_handler,
605 sport, 0);
606 if (IS_ERR(sport->mad_agent)) {
607 pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n",
608 dev_name(&sport->sdev->device->dev), sport->port,
609 PTR_ERR(sport->mad_agent));
610 sport->mad_agent = NULL;
611 memset(&port_modify, 0, sizeof(port_modify));
612 port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
613 ib_modify_port(sport->sdev->device, sport->port, 0,
614 &port_modify);
615
616 }
617 }
618
619 return 0;
620}
621
622
623
624
625
626
627
628static void srpt_unregister_mad_agent(struct srpt_device *sdev)
629{
630 struct ib_port_modify port_modify = {
631 .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
632 };
633 struct srpt_port *sport;
634 int i;
635
636 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
637 sport = &sdev->port[i - 1];
638 WARN_ON(sport->port != i);
639 if (sport->mad_agent) {
640 ib_modify_port(sdev->device, i, 0, &port_modify);
641 ib_unregister_mad_agent(sport->mad_agent);
642 sport->mad_agent = NULL;
643 }
644 }
645}
646
647
648
649
650
651
652
653
654static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
655 int ioctx_size,
656 struct kmem_cache *buf_cache,
657 enum dma_data_direction dir)
658{
659 struct srpt_ioctx *ioctx;
660
661 ioctx = kzalloc(ioctx_size, GFP_KERNEL);
662 if (!ioctx)
663 goto err;
664
665 ioctx->buf = kmem_cache_alloc(buf_cache, GFP_KERNEL);
666 if (!ioctx->buf)
667 goto err_free_ioctx;
668
669 ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf,
670 kmem_cache_size(buf_cache), dir);
671 if (ib_dma_mapping_error(sdev->device, ioctx->dma))
672 goto err_free_buf;
673
674 return ioctx;
675
676err_free_buf:
677 kmem_cache_free(buf_cache, ioctx->buf);
678err_free_ioctx:
679 kfree(ioctx);
680err:
681 return NULL;
682}
683
684
685
686
687
688
689
690
691static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
692 struct kmem_cache *buf_cache,
693 enum dma_data_direction dir)
694{
695 if (!ioctx)
696 return;
697
698 ib_dma_unmap_single(sdev->device, ioctx->dma,
699 kmem_cache_size(buf_cache), dir);
700 kmem_cache_free(buf_cache, ioctx->buf);
701 kfree(ioctx);
702}
703
704
705
706
707
708
709
710
711
712
713
714static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
715 int ring_size, int ioctx_size,
716 struct kmem_cache *buf_cache,
717 int alignment_offset,
718 enum dma_data_direction dir)
719{
720 struct srpt_ioctx **ring;
721 int i;
722
723 WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx) &&
724 ioctx_size != sizeof(struct srpt_send_ioctx));
725
726 ring = kvmalloc_array(ring_size, sizeof(ring[0]), GFP_KERNEL);
727 if (!ring)
728 goto out;
729 for (i = 0; i < ring_size; ++i) {
730 ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, buf_cache, dir);
731 if (!ring[i])
732 goto err;
733 ring[i]->index = i;
734 ring[i]->offset = alignment_offset;
735 }
736 goto out;
737
738err:
739 while (--i >= 0)
740 srpt_free_ioctx(sdev, ring[i], buf_cache, dir);
741 kvfree(ring);
742 ring = NULL;
743out:
744 return ring;
745}
746
747
748
749
750
751
752
753
754
755static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
756 struct srpt_device *sdev, int ring_size,
757 struct kmem_cache *buf_cache,
758 enum dma_data_direction dir)
759{
760 int i;
761
762 if (!ioctx_ring)
763 return;
764
765 for (i = 0; i < ring_size; ++i)
766 srpt_free_ioctx(sdev, ioctx_ring[i], buf_cache, dir);
767 kvfree(ioctx_ring);
768}
769
770
771
772
773
774
775
776
777
778static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
779 enum srpt_command_state new)
780{
781 enum srpt_command_state previous;
782
783 previous = ioctx->state;
784 if (previous != SRPT_STATE_DONE)
785 ioctx->state = new;
786
787 return previous;
788}
789
790
791
792
793
794
795
796
797
798static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
799 enum srpt_command_state old,
800 enum srpt_command_state new)
801{
802 enum srpt_command_state previous;
803
804 WARN_ON(!ioctx);
805 WARN_ON(old == SRPT_STATE_DONE);
806 WARN_ON(new == SRPT_STATE_NEW);
807
808 previous = ioctx->state;
809 if (previous == old)
810 ioctx->state = new;
811
812 return previous == old;
813}
814
815
816
817
818
819
820
821static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
822 struct srpt_recv_ioctx *ioctx)
823{
824 struct ib_sge list;
825 struct ib_recv_wr wr;
826
827 BUG_ON(!sdev);
828 list.addr = ioctx->ioctx.dma + ioctx->ioctx.offset;
829 list.length = srp_max_req_size;
830 list.lkey = sdev->lkey;
831
832 ioctx->ioctx.cqe.done = srpt_recv_done;
833 wr.wr_cqe = &ioctx->ioctx.cqe;
834 wr.next = NULL;
835 wr.sg_list = &list;
836 wr.num_sge = 1;
837
838 if (sdev->use_srq)
839 return ib_post_srq_recv(sdev->srq, &wr, NULL);
840 else
841 return ib_post_recv(ch->qp, &wr, NULL);
842}
843
844
845
846
847
848
849
850
851
852
853static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
854{
855 struct ib_rdma_wr wr = {
856 .wr = {
857 .next = NULL,
858 { .wr_cqe = &ch->zw_cqe, },
859 .opcode = IB_WR_RDMA_WRITE,
860 .send_flags = IB_SEND_SIGNALED,
861 }
862 };
863
864 pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
865 ch->qp->qp_num);
866
867 return ib_post_send(ch->qp, &wr.wr, NULL);
868}
869
870static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
871{
872 struct srpt_rdma_ch *ch = wc->qp->qp_context;
873
874 pr_debug("%s-%d wc->status %d\n", ch->sess_name, ch->qp->qp_num,
875 wc->status);
876
877 if (wc->status == IB_WC_SUCCESS) {
878 srpt_process_wait_list(ch);
879 } else {
880 if (srpt_set_ch_state(ch, CH_DISCONNECTED))
881 schedule_work(&ch->release_work);
882 else
883 pr_debug("%s-%d: already disconnected.\n",
884 ch->sess_name, ch->qp->qp_num);
885 }
886}
887
888static int srpt_alloc_rw_ctxs(struct srpt_send_ioctx *ioctx,
889 struct srp_direct_buf *db, int nbufs, struct scatterlist **sg,
890 unsigned *sg_cnt)
891{
892 enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
893 struct srpt_rdma_ch *ch = ioctx->ch;
894 struct scatterlist *prev = NULL;
895 unsigned prev_nents;
896 int ret, i;
897
898 if (nbufs == 1) {
899 ioctx->rw_ctxs = &ioctx->s_rw_ctx;
900 } else {
901 ioctx->rw_ctxs = kmalloc_array(nbufs, sizeof(*ioctx->rw_ctxs),
902 GFP_KERNEL);
903 if (!ioctx->rw_ctxs)
904 return -ENOMEM;
905 }
906
907 for (i = ioctx->n_rw_ctx; i < nbufs; i++, db++) {
908 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
909 u64 remote_addr = be64_to_cpu(db->va);
910 u32 size = be32_to_cpu(db->len);
911 u32 rkey = be32_to_cpu(db->key);
912
913 ret = target_alloc_sgl(&ctx->sg, &ctx->nents, size, false,
914 i < nbufs - 1);
915 if (ret)
916 goto unwind;
917
918 ret = rdma_rw_ctx_init(&ctx->rw, ch->qp, ch->sport->port,
919 ctx->sg, ctx->nents, 0, remote_addr, rkey, dir);
920 if (ret < 0) {
921 target_free_sgl(ctx->sg, ctx->nents);
922 goto unwind;
923 }
924
925 ioctx->n_rdma += ret;
926 ioctx->n_rw_ctx++;
927
928 if (prev) {
929 sg_unmark_end(&prev[prev_nents - 1]);
930 sg_chain(prev, prev_nents + 1, ctx->sg);
931 } else {
932 *sg = ctx->sg;
933 }
934
935 prev = ctx->sg;
936 prev_nents = ctx->nents;
937
938 *sg_cnt += ctx->nents;
939 }
940
941 return 0;
942
943unwind:
944 while (--i >= 0) {
945 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
946
947 rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
948 ctx->sg, ctx->nents, dir);
949 target_free_sgl(ctx->sg, ctx->nents);
950 }
951 if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
952 kfree(ioctx->rw_ctxs);
953 return ret;
954}
955
956static void srpt_free_rw_ctxs(struct srpt_rdma_ch *ch,
957 struct srpt_send_ioctx *ioctx)
958{
959 enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
960 int i;
961
962 for (i = 0; i < ioctx->n_rw_ctx; i++) {
963 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
964
965 rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
966 ctx->sg, ctx->nents, dir);
967 target_free_sgl(ctx->sg, ctx->nents);
968 }
969
970 if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
971 kfree(ioctx->rw_ctxs);
972}
973
974static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd)
975{
976
977
978
979
980
981 BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) &&
982 !__same_type(srp_cmd->add_data[0], (u8)0));
983
984
985
986
987
988
989 return srp_cmd->add_data + (srp_cmd->add_cdb_len & ~3);
990}
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011static int srpt_get_desc_tbl(struct srpt_recv_ioctx *recv_ioctx,
1012 struct srpt_send_ioctx *ioctx,
1013 struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
1014 struct scatterlist **sg, unsigned int *sg_cnt, u64 *data_len,
1015 u16 imm_data_offset)
1016{
1017 BUG_ON(!dir);
1018 BUG_ON(!data_len);
1019
1020
1021
1022
1023
1024
1025 if (srp_cmd->buf_fmt & 0xf)
1026
1027 *dir = DMA_FROM_DEVICE;
1028 else if (srp_cmd->buf_fmt >> 4)
1029
1030 *dir = DMA_TO_DEVICE;
1031 else
1032 *dir = DMA_NONE;
1033
1034
1035 ioctx->cmd.data_direction = *dir;
1036
1037 if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
1038 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
1039 struct srp_direct_buf *db = srpt_get_desc_buf(srp_cmd);
1040
1041 *data_len = be32_to_cpu(db->len);
1042 return srpt_alloc_rw_ctxs(ioctx, db, 1, sg, sg_cnt);
1043 } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
1044 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
1045 struct srp_indirect_buf *idb = srpt_get_desc_buf(srp_cmd);
1046 int nbufs = be32_to_cpu(idb->table_desc.len) /
1047 sizeof(struct srp_direct_buf);
1048
1049 if (nbufs >
1050 (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
1051 pr_err("received unsupported SRP_CMD request type (%u out + %u in != %u / %zu)\n",
1052 srp_cmd->data_out_desc_cnt,
1053 srp_cmd->data_in_desc_cnt,
1054 be32_to_cpu(idb->table_desc.len),
1055 sizeof(struct srp_direct_buf));
1056 return -EINVAL;
1057 }
1058
1059 *data_len = be32_to_cpu(idb->len);
1060 return srpt_alloc_rw_ctxs(ioctx, idb->desc_list, nbufs,
1061 sg, sg_cnt);
1062 } else if ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_IMM) {
1063 struct srp_imm_buf *imm_buf = srpt_get_desc_buf(srp_cmd);
1064 void *data = (void *)srp_cmd + imm_data_offset;
1065 uint32_t len = be32_to_cpu(imm_buf->len);
1066 uint32_t req_size = imm_data_offset + len;
1067
1068 if (req_size > srp_max_req_size) {
1069 pr_err("Immediate data (length %d + %d) exceeds request size %d\n",
1070 imm_data_offset, len, srp_max_req_size);
1071 return -EINVAL;
1072 }
1073 if (recv_ioctx->byte_len < req_size) {
1074 pr_err("Received too few data - %d < %d\n",
1075 recv_ioctx->byte_len, req_size);
1076 return -EIO;
1077 }
1078
1079
1080
1081
1082 if ((void *)(imm_buf + 1) > (void *)data) {
1083 pr_err("Received invalid write request\n");
1084 return -EINVAL;
1085 }
1086 *data_len = len;
1087 ioctx->recv_ioctx = recv_ioctx;
1088 if ((uintptr_t)data & 511) {
1089 pr_warn_once("Internal error - the receive buffers are not aligned properly.\n");
1090 return -EINVAL;
1091 }
1092 sg_init_one(&ioctx->imm_sg, data, len);
1093 *sg = &ioctx->imm_sg;
1094 *sg_cnt = 1;
1095 return 0;
1096 } else {
1097 *data_len = 0;
1098 return 0;
1099 }
1100}
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1111{
1112 struct ib_qp_attr *attr;
1113 int ret;
1114
1115 WARN_ON_ONCE(ch->using_rdma_cm);
1116
1117 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
1118 if (!attr)
1119 return -ENOMEM;
1120
1121 attr->qp_state = IB_QPS_INIT;
1122 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
1123 attr->port_num = ch->sport->port;
1124
1125 ret = ib_find_cached_pkey(ch->sport->sdev->device, ch->sport->port,
1126 ch->pkey, &attr->pkey_index);
1127 if (ret < 0)
1128 pr_err("Translating pkey %#x failed (%d) - using index 0\n",
1129 ch->pkey, ret);
1130
1131 ret = ib_modify_qp(qp, attr,
1132 IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
1133 IB_QP_PKEY_INDEX);
1134
1135 kfree(attr);
1136 return ret;
1137}
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1151{
1152 struct ib_qp_attr qp_attr;
1153 int attr_mask;
1154 int ret;
1155
1156 WARN_ON_ONCE(ch->using_rdma_cm);
1157
1158 qp_attr.qp_state = IB_QPS_RTR;
1159 ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
1160 if (ret)
1161 goto out;
1162
1163 qp_attr.max_dest_rd_atomic = 4;
1164
1165 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1166
1167out:
1168 return ret;
1169}
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1183{
1184 struct ib_qp_attr qp_attr;
1185 int attr_mask;
1186 int ret;
1187
1188 qp_attr.qp_state = IB_QPS_RTS;
1189 ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
1190 if (ret)
1191 goto out;
1192
1193 qp_attr.max_rd_atomic = 4;
1194
1195 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1196
1197out:
1198 return ret;
1199}
1200
1201
1202
1203
1204
1205static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
1206{
1207 struct ib_qp_attr qp_attr;
1208
1209 qp_attr.qp_state = IB_QPS_ERR;
1210 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
1211}
1212
1213
1214
1215
1216
1217static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1218{
1219 struct srpt_send_ioctx *ioctx;
1220 int tag, cpu;
1221
1222 BUG_ON(!ch);
1223
1224 tag = sbitmap_queue_get(&ch->sess->sess_tag_pool, &cpu);
1225 if (tag < 0)
1226 return NULL;
1227
1228 ioctx = ch->ioctx_ring[tag];
1229 BUG_ON(ioctx->ch != ch);
1230 ioctx->state = SRPT_STATE_NEW;
1231 WARN_ON_ONCE(ioctx->recv_ioctx);
1232 ioctx->n_rdma = 0;
1233 ioctx->n_rw_ctx = 0;
1234 ioctx->queue_status_only = false;
1235
1236
1237
1238
1239 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1240 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1241 ioctx->cmd.map_tag = tag;
1242 ioctx->cmd.map_cpu = cpu;
1243
1244 return ioctx;
1245}
1246
1247
1248
1249
1250
1251static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1252{
1253 enum srpt_command_state state;
1254
1255 BUG_ON(!ioctx);
1256
1257
1258
1259
1260
1261
1262 state = ioctx->state;
1263 switch (state) {
1264 case SRPT_STATE_NEED_DATA:
1265 ioctx->state = SRPT_STATE_DATA_IN;
1266 break;
1267 case SRPT_STATE_CMD_RSP_SENT:
1268 case SRPT_STATE_MGMT_RSP_SENT:
1269 ioctx->state = SRPT_STATE_DONE;
1270 break;
1271 default:
1272 WARN_ONCE(true, "%s: unexpected I/O context state %d\n",
1273 __func__, state);
1274 break;
1275 }
1276
1277 pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state,
1278 ioctx->state, ioctx->cmd.tag);
1279
1280 switch (state) {
1281 case SRPT_STATE_NEW:
1282 case SRPT_STATE_DATA_IN:
1283 case SRPT_STATE_MGMT:
1284 case SRPT_STATE_DONE:
1285
1286
1287
1288
1289 break;
1290 case SRPT_STATE_NEED_DATA:
1291 pr_debug("tag %#llx: RDMA read error\n", ioctx->cmd.tag);
1292 transport_generic_request_failure(&ioctx->cmd,
1293 TCM_CHECK_CONDITION_ABORT_CMD);
1294 break;
1295 case SRPT_STATE_CMD_RSP_SENT:
1296
1297
1298
1299
1300 transport_generic_free_cmd(&ioctx->cmd, 0);
1301 break;
1302 case SRPT_STATE_MGMT_RSP_SENT:
1303 transport_generic_free_cmd(&ioctx->cmd, 0);
1304 break;
1305 default:
1306 WARN(1, "Unexpected command state (%d)", state);
1307 break;
1308 }
1309
1310 return state;
1311}
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1324{
1325 struct srpt_rdma_ch *ch = wc->qp->qp_context;
1326 struct srpt_send_ioctx *ioctx =
1327 container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
1328
1329 WARN_ON(ioctx->n_rdma <= 0);
1330 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1331 ioctx->n_rdma = 0;
1332
1333 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1334 pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n",
1335 ioctx, wc->status);
1336 srpt_abort_cmd(ioctx);
1337 return;
1338 }
1339
1340 if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
1341 SRPT_STATE_DATA_IN))
1342 target_execute_cmd(&ioctx->cmd);
1343 else
1344 pr_err("%s[%d]: wrong state = %d\n", __func__,
1345 __LINE__, ioctx->state);
1346}
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1364 struct srpt_send_ioctx *ioctx, u64 tag,
1365 int status)
1366{
1367 struct se_cmd *cmd = &ioctx->cmd;
1368 struct srp_rsp *srp_rsp;
1369 const u8 *sense_data;
1370 int sense_data_len, max_sense_len;
1371 u32 resid = cmd->residual_count;
1372
1373
1374
1375
1376
1377 WARN_ON(status & 1);
1378
1379 srp_rsp = ioctx->ioctx.buf;
1380 BUG_ON(!srp_rsp);
1381
1382 sense_data = ioctx->sense_data;
1383 sense_data_len = ioctx->cmd.scsi_sense_length;
1384 WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
1385
1386 memset(srp_rsp, 0, sizeof(*srp_rsp));
1387 srp_rsp->opcode = SRP_RSP;
1388 srp_rsp->req_lim_delta =
1389 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1390 srp_rsp->tag = tag;
1391 srp_rsp->status = status;
1392
1393 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1394 if (cmd->data_direction == DMA_TO_DEVICE) {
1395
1396 srp_rsp->flags = SRP_RSP_FLAG_DOUNDER;
1397 srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
1398 } else if (cmd->data_direction == DMA_FROM_DEVICE) {
1399
1400 srp_rsp->flags = SRP_RSP_FLAG_DIUNDER;
1401 srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
1402 }
1403 } else if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1404 if (cmd->data_direction == DMA_TO_DEVICE) {
1405
1406 srp_rsp->flags = SRP_RSP_FLAG_DOOVER;
1407 srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
1408 } else if (cmd->data_direction == DMA_FROM_DEVICE) {
1409
1410 srp_rsp->flags = SRP_RSP_FLAG_DIOVER;
1411 srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
1412 }
1413 }
1414
1415 if (sense_data_len) {
1416 BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
1417 max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
1418 if (sense_data_len > max_sense_len) {
1419 pr_warn("truncated sense data from %d to %d bytes\n",
1420 sense_data_len, max_sense_len);
1421 sense_data_len = max_sense_len;
1422 }
1423
1424 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
1425 srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
1426 memcpy(srp_rsp + 1, sense_data, sense_data_len);
1427 }
1428
1429 return sizeof(*srp_rsp) + sense_data_len;
1430}
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1446 struct srpt_send_ioctx *ioctx,
1447 u8 rsp_code, u64 tag)
1448{
1449 struct srp_rsp *srp_rsp;
1450 int resp_data_len;
1451 int resp_len;
1452
1453 resp_data_len = 4;
1454 resp_len = sizeof(*srp_rsp) + resp_data_len;
1455
1456 srp_rsp = ioctx->ioctx.buf;
1457 BUG_ON(!srp_rsp);
1458 memset(srp_rsp, 0, sizeof(*srp_rsp));
1459
1460 srp_rsp->opcode = SRP_RSP;
1461 srp_rsp->req_lim_delta =
1462 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1463 srp_rsp->tag = tag;
1464
1465 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1466 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1467 srp_rsp->data[3] = rsp_code;
1468
1469 return resp_len;
1470}
1471
1472static int srpt_check_stop_free(struct se_cmd *cmd)
1473{
1474 struct srpt_send_ioctx *ioctx = container_of(cmd,
1475 struct srpt_send_ioctx, cmd);
1476
1477 return target_put_sess_cmd(&ioctx->cmd);
1478}
1479
1480
1481
1482
1483
1484
1485
1486static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
1487 struct srpt_recv_ioctx *recv_ioctx,
1488 struct srpt_send_ioctx *send_ioctx)
1489{
1490 struct se_cmd *cmd;
1491 struct srp_cmd *srp_cmd;
1492 struct scatterlist *sg = NULL;
1493 unsigned sg_cnt = 0;
1494 u64 data_len;
1495 enum dma_data_direction dir;
1496 int rc;
1497
1498 BUG_ON(!send_ioctx);
1499
1500 srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
1501 cmd = &send_ioctx->cmd;
1502 cmd->tag = srp_cmd->tag;
1503
1504 switch (srp_cmd->task_attr) {
1505 case SRP_CMD_SIMPLE_Q:
1506 cmd->sam_task_attr = TCM_SIMPLE_TAG;
1507 break;
1508 case SRP_CMD_ORDERED_Q:
1509 default:
1510 cmd->sam_task_attr = TCM_ORDERED_TAG;
1511 break;
1512 case SRP_CMD_HEAD_OF_Q:
1513 cmd->sam_task_attr = TCM_HEAD_TAG;
1514 break;
1515 case SRP_CMD_ACA:
1516 cmd->sam_task_attr = TCM_ACA_TAG;
1517 break;
1518 }
1519
1520 rc = srpt_get_desc_tbl(recv_ioctx, send_ioctx, srp_cmd, &dir,
1521 &sg, &sg_cnt, &data_len, ch->imm_data_offset);
1522 if (rc) {
1523 if (rc != -EAGAIN) {
1524 pr_err("0x%llx: parsing SRP descriptor table failed.\n",
1525 srp_cmd->tag);
1526 }
1527 goto busy;
1528 }
1529
1530 rc = target_submit_cmd_map_sgls(cmd, ch->sess, srp_cmd->cdb,
1531 &send_ioctx->sense_data[0],
1532 scsilun_to_int(&srp_cmd->lun), data_len,
1533 TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF,
1534 sg, sg_cnt, NULL, 0, NULL, 0);
1535 if (rc != 0) {
1536 pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
1537 srp_cmd->tag);
1538 goto busy;
1539 }
1540 return;
1541
1542busy:
1543 target_send_busy(cmd);
1544}
1545
1546static int srp_tmr_to_tcm(int fn)
1547{
1548 switch (fn) {
1549 case SRP_TSK_ABORT_TASK:
1550 return TMR_ABORT_TASK;
1551 case SRP_TSK_ABORT_TASK_SET:
1552 return TMR_ABORT_TASK_SET;
1553 case SRP_TSK_CLEAR_TASK_SET:
1554 return TMR_CLEAR_TASK_SET;
1555 case SRP_TSK_LUN_RESET:
1556 return TMR_LUN_RESET;
1557 case SRP_TSK_CLEAR_ACA:
1558 return TMR_CLEAR_ACA;
1559 default:
1560 return -1;
1561 }
1562}
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
1576 struct srpt_recv_ioctx *recv_ioctx,
1577 struct srpt_send_ioctx *send_ioctx)
1578{
1579 struct srp_tsk_mgmt *srp_tsk;
1580 struct se_cmd *cmd;
1581 struct se_session *sess = ch->sess;
1582 int tcm_tmr;
1583 int rc;
1584
1585 BUG_ON(!send_ioctx);
1586
1587 srp_tsk = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
1588 cmd = &send_ioctx->cmd;
1589
1590 pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld ch %p sess %p\n",
1591 srp_tsk->tsk_mgmt_func, srp_tsk->task_tag, srp_tsk->tag, ch,
1592 ch->sess);
1593
1594 srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
1595 send_ioctx->cmd.tag = srp_tsk->tag;
1596 tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
1597 rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL,
1598 scsilun_to_int(&srp_tsk->lun), srp_tsk, tcm_tmr,
1599 GFP_KERNEL, srp_tsk->task_tag,
1600 TARGET_SCF_ACK_KREF);
1601 if (rc != 0) {
1602 send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
1603 cmd->se_tfo->queue_tm_rsp(cmd);
1604 }
1605 return;
1606}
1607
1608
1609
1610
1611
1612
1613static bool
1614srpt_handle_new_iu(struct srpt_rdma_ch *ch, struct srpt_recv_ioctx *recv_ioctx)
1615{
1616 struct srpt_send_ioctx *send_ioctx = NULL;
1617 struct srp_cmd *srp_cmd;
1618 bool res = false;
1619 u8 opcode;
1620
1621 BUG_ON(!ch);
1622 BUG_ON(!recv_ioctx);
1623
1624 if (unlikely(ch->state == CH_CONNECTING))
1625 goto push;
1626
1627 ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1628 recv_ioctx->ioctx.dma,
1629 recv_ioctx->ioctx.offset + srp_max_req_size,
1630 DMA_FROM_DEVICE);
1631
1632 srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
1633 opcode = srp_cmd->opcode;
1634 if (opcode == SRP_CMD || opcode == SRP_TSK_MGMT) {
1635 send_ioctx = srpt_get_send_ioctx(ch);
1636 if (unlikely(!send_ioctx))
1637 goto push;
1638 }
1639
1640 if (!list_empty(&recv_ioctx->wait_list)) {
1641 WARN_ON_ONCE(!ch->processing_wait_list);
1642 list_del_init(&recv_ioctx->wait_list);
1643 }
1644
1645 switch (opcode) {
1646 case SRP_CMD:
1647 srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
1648 break;
1649 case SRP_TSK_MGMT:
1650 srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
1651 break;
1652 case SRP_I_LOGOUT:
1653 pr_err("Not yet implemented: SRP_I_LOGOUT\n");
1654 break;
1655 case SRP_CRED_RSP:
1656 pr_debug("received SRP_CRED_RSP\n");
1657 break;
1658 case SRP_AER_RSP:
1659 pr_debug("received SRP_AER_RSP\n");
1660 break;
1661 case SRP_RSP:
1662 pr_err("Received SRP_RSP\n");
1663 break;
1664 default:
1665 pr_err("received IU with unknown opcode 0x%x\n", opcode);
1666 break;
1667 }
1668
1669 if (!send_ioctx || !send_ioctx->recv_ioctx)
1670 srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
1671 res = true;
1672
1673out:
1674 return res;
1675
1676push:
1677 if (list_empty(&recv_ioctx->wait_list)) {
1678 WARN_ON_ONCE(ch->processing_wait_list);
1679 list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
1680 }
1681 goto out;
1682}
1683
1684static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1685{
1686 struct srpt_rdma_ch *ch = wc->qp->qp_context;
1687 struct srpt_recv_ioctx *ioctx =
1688 container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe);
1689
1690 if (wc->status == IB_WC_SUCCESS) {
1691 int req_lim;
1692
1693 req_lim = atomic_dec_return(&ch->req_lim);
1694 if (unlikely(req_lim < 0))
1695 pr_err("req_lim = %d < 0\n", req_lim);
1696 ioctx->byte_len = wc->byte_len;
1697 srpt_handle_new_iu(ch, ioctx);
1698 } else {
1699 pr_info_ratelimited("receiving failed for ioctx %p with status %d\n",
1700 ioctx, wc->status);
1701 }
1702}
1703
1704
1705
1706
1707
1708
1709static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
1710{
1711 struct srpt_recv_ioctx *recv_ioctx, *tmp;
1712
1713 WARN_ON_ONCE(ch->state == CH_CONNECTING);
1714
1715 if (list_empty(&ch->cmd_wait_list))
1716 return;
1717
1718 WARN_ON_ONCE(ch->processing_wait_list);
1719 ch->processing_wait_list = true;
1720 list_for_each_entry_safe(recv_ioctx, tmp, &ch->cmd_wait_list,
1721 wait_list) {
1722 if (!srpt_handle_new_iu(ch, recv_ioctx))
1723 break;
1724 }
1725 ch->processing_wait_list = false;
1726}
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
1746{
1747 struct srpt_rdma_ch *ch = wc->qp->qp_context;
1748 struct srpt_send_ioctx *ioctx =
1749 container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe);
1750 enum srpt_command_state state;
1751
1752 state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1753
1754 WARN_ON(state != SRPT_STATE_CMD_RSP_SENT &&
1755 state != SRPT_STATE_MGMT_RSP_SENT);
1756
1757 atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
1758
1759 if (wc->status != IB_WC_SUCCESS)
1760 pr_info("sending response for ioctx 0x%p failed with status %d\n",
1761 ioctx, wc->status);
1762
1763 if (state != SRPT_STATE_DONE) {
1764 transport_generic_free_cmd(&ioctx->cmd, 0);
1765 } else {
1766 pr_err("IB completion has been received too late for wr_id = %u.\n",
1767 ioctx->ioctx.index);
1768 }
1769
1770 srpt_process_wait_list(ch);
1771}
1772
1773
1774
1775
1776
1777static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
1778{
1779 struct ib_qp_init_attr *qp_init;
1780 struct srpt_port *sport = ch->sport;
1781 struct srpt_device *sdev = sport->sdev;
1782 const struct ib_device_attr *attrs = &sdev->device->attrs;
1783 int sq_size = sport->port_attrib.srp_sq_size;
1784 int i, ret;
1785
1786 WARN_ON(ch->rq_size < 1);
1787
1788 ret = -ENOMEM;
1789 qp_init = kzalloc(sizeof(*qp_init), GFP_KERNEL);
1790 if (!qp_init)
1791 goto out;
1792
1793retry:
1794 ch->cq = ib_cq_pool_get(sdev->device, ch->rq_size + sq_size, -1,
1795 IB_POLL_WORKQUEUE);
1796 if (IS_ERR(ch->cq)) {
1797 ret = PTR_ERR(ch->cq);
1798 pr_err("failed to create CQ cqe= %d ret= %d\n",
1799 ch->rq_size + sq_size, ret);
1800 goto out;
1801 }
1802 ch->cq_size = ch->rq_size + sq_size;
1803
1804 qp_init->qp_context = (void *)ch;
1805 qp_init->event_handler
1806 = (void(*)(struct ib_event *, void*))srpt_qp_event;
1807 qp_init->send_cq = ch->cq;
1808 qp_init->recv_cq = ch->cq;
1809 qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
1810 qp_init->qp_type = IB_QPT_RC;
1811
1812
1813
1814
1815
1816
1817
1818 qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
1819 qp_init->cap.max_rdma_ctxs = sq_size / 2;
1820 qp_init->cap.max_send_sge = attrs->max_send_sge;
1821 qp_init->cap.max_recv_sge = 1;
1822 qp_init->port_num = ch->sport->port;
1823 if (sdev->use_srq)
1824 qp_init->srq = sdev->srq;
1825 else
1826 qp_init->cap.max_recv_wr = ch->rq_size;
1827
1828 if (ch->using_rdma_cm) {
1829 ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init);
1830 ch->qp = ch->rdma_cm.cm_id->qp;
1831 } else {
1832 ch->qp = ib_create_qp(sdev->pd, qp_init);
1833 if (!IS_ERR(ch->qp)) {
1834 ret = srpt_init_ch_qp(ch, ch->qp);
1835 if (ret)
1836 ib_destroy_qp(ch->qp);
1837 } else {
1838 ret = PTR_ERR(ch->qp);
1839 }
1840 }
1841 if (ret) {
1842 bool retry = sq_size > MIN_SRPT_SQ_SIZE;
1843
1844 if (retry) {
1845 pr_debug("failed to create queue pair with sq_size = %d (%d) - retrying\n",
1846 sq_size, ret);
1847 ib_cq_pool_put(ch->cq, ch->cq_size);
1848 sq_size = max(sq_size / 2, MIN_SRPT_SQ_SIZE);
1849 goto retry;
1850 } else {
1851 pr_err("failed to create queue pair with sq_size = %d (%d)\n",
1852 sq_size, ret);
1853 goto err_destroy_cq;
1854 }
1855 }
1856
1857 atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
1858
1859 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d ch= %p\n",
1860 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
1861 qp_init->cap.max_send_wr, ch);
1862
1863 if (!sdev->use_srq)
1864 for (i = 0; i < ch->rq_size; i++)
1865 srpt_post_recv(sdev, ch, ch->ioctx_recv_ring[i]);
1866
1867out:
1868 kfree(qp_init);
1869 return ret;
1870
1871err_destroy_cq:
1872 ch->qp = NULL;
1873 ib_cq_pool_put(ch->cq, ch->cq_size);
1874 goto out;
1875}
1876
1877static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
1878{
1879 ib_destroy_qp(ch->qp);
1880 ib_cq_pool_put(ch->cq, ch->cq_size);
1881}
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893static bool srpt_close_ch(struct srpt_rdma_ch *ch)
1894{
1895 int ret;
1896
1897 if (!srpt_set_ch_state(ch, CH_DRAINING)) {
1898 pr_debug("%s: already closed\n", ch->sess_name);
1899 return false;
1900 }
1901
1902 kref_get(&ch->kref);
1903
1904 ret = srpt_ch_qp_err(ch);
1905 if (ret < 0)
1906 pr_err("%s-%d: changing queue pair into error state failed: %d\n",
1907 ch->sess_name, ch->qp->qp_num, ret);
1908
1909 ret = srpt_zerolength_write(ch);
1910 if (ret < 0) {
1911 pr_err("%s-%d: queuing zero-length write failed: %d\n",
1912 ch->sess_name, ch->qp->qp_num, ret);
1913 if (srpt_set_ch_state(ch, CH_DISCONNECTED))
1914 schedule_work(&ch->release_work);
1915 else
1916 WARN_ON_ONCE(true);
1917 }
1918
1919 kref_put(&ch->kref, srpt_free_ch);
1920
1921 return true;
1922}
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
1934{
1935 int ret;
1936
1937 if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
1938 return -ENOTCONN;
1939
1940 if (ch->using_rdma_cm) {
1941 ret = rdma_disconnect(ch->rdma_cm.cm_id);
1942 } else {
1943 ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0);
1944 if (ret < 0)
1945 ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0);
1946 }
1947
1948 if (ret < 0 && srpt_close_ch(ch))
1949 ret = 0;
1950
1951 return ret;
1952}
1953
1954
1955static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
1956{
1957 DECLARE_COMPLETION_ONSTACK(closed);
1958 struct srpt_port *sport = ch->sport;
1959
1960 pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
1961 ch->state);
1962
1963 ch->closed = &closed;
1964
1965 mutex_lock(&sport->mutex);
1966 srpt_disconnect_ch(ch);
1967 mutex_unlock(&sport->mutex);
1968
1969 while (wait_for_completion_timeout(&closed, 5 * HZ) == 0)
1970 pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
1971 ch->sess_name, ch->qp->qp_num, ch->state);
1972
1973}
1974
1975static void __srpt_close_all_ch(struct srpt_port *sport)
1976{
1977 struct srpt_nexus *nexus;
1978 struct srpt_rdma_ch *ch;
1979
1980 lockdep_assert_held(&sport->mutex);
1981
1982 list_for_each_entry(nexus, &sport->nexus_list, entry) {
1983 list_for_each_entry(ch, &nexus->ch_list, list) {
1984 if (srpt_disconnect_ch(ch) >= 0)
1985 pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
1986 ch->sess_name, ch->qp->qp_num,
1987 dev_name(&sport->sdev->device->dev),
1988 sport->port);
1989 srpt_close_ch(ch);
1990 }
1991 }
1992}
1993
1994
1995
1996
1997
1998static struct srpt_nexus *srpt_get_nexus(struct srpt_port *sport,
1999 const u8 i_port_id[16],
2000 const u8 t_port_id[16])
2001{
2002 struct srpt_nexus *nexus = NULL, *tmp_nexus = NULL, *n;
2003
2004 for (;;) {
2005 mutex_lock(&sport->mutex);
2006 list_for_each_entry(n, &sport->nexus_list, entry) {
2007 if (memcmp(n->i_port_id, i_port_id, 16) == 0 &&
2008 memcmp(n->t_port_id, t_port_id, 16) == 0) {
2009 nexus = n;
2010 break;
2011 }
2012 }
2013 if (!nexus && tmp_nexus) {
2014 list_add_tail_rcu(&tmp_nexus->entry,
2015 &sport->nexus_list);
2016 swap(nexus, tmp_nexus);
2017 }
2018 mutex_unlock(&sport->mutex);
2019
2020 if (nexus)
2021 break;
2022 tmp_nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
2023 if (!tmp_nexus) {
2024 nexus = ERR_PTR(-ENOMEM);
2025 break;
2026 }
2027 INIT_LIST_HEAD(&tmp_nexus->ch_list);
2028 memcpy(tmp_nexus->i_port_id, i_port_id, 16);
2029 memcpy(tmp_nexus->t_port_id, t_port_id, 16);
2030 }
2031
2032 kfree(tmp_nexus);
2033
2034 return nexus;
2035}
2036
2037static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
2038 __must_hold(&sport->mutex)
2039{
2040 lockdep_assert_held(&sport->mutex);
2041
2042 if (sport->enabled == enabled)
2043 return;
2044 sport->enabled = enabled;
2045 if (!enabled)
2046 __srpt_close_all_ch(sport);
2047}
2048
2049static void srpt_drop_sport_ref(struct srpt_port *sport)
2050{
2051 if (atomic_dec_return(&sport->refcount) == 0 && sport->freed_channels)
2052 complete(sport->freed_channels);
2053}
2054
2055static void srpt_free_ch(struct kref *kref)
2056{
2057 struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
2058
2059 srpt_drop_sport_ref(ch->sport);
2060 kfree_rcu(ch, rcu);
2061}
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071static void srpt_release_channel_work(struct work_struct *w)
2072{
2073 struct srpt_rdma_ch *ch;
2074 struct srpt_device *sdev;
2075 struct srpt_port *sport;
2076 struct se_session *se_sess;
2077
2078 ch = container_of(w, struct srpt_rdma_ch, release_work);
2079 pr_debug("%s-%d\n", ch->sess_name, ch->qp->qp_num);
2080
2081 sdev = ch->sport->sdev;
2082 BUG_ON(!sdev);
2083
2084 se_sess = ch->sess;
2085 BUG_ON(!se_sess);
2086
2087 target_sess_cmd_list_set_waiting(se_sess);
2088 target_wait_for_sess_cmds(se_sess);
2089
2090 target_remove_session(se_sess);
2091 ch->sess = NULL;
2092
2093 if (ch->using_rdma_cm)
2094 rdma_destroy_id(ch->rdma_cm.cm_id);
2095 else
2096 ib_destroy_cm_id(ch->ib_cm.cm_id);
2097
2098 sport = ch->sport;
2099 mutex_lock(&sport->mutex);
2100 list_del_rcu(&ch->list);
2101 mutex_unlock(&sport->mutex);
2102
2103 if (ch->closed)
2104 complete(ch->closed);
2105
2106 srpt_destroy_ch_ib(ch);
2107
2108 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2109 ch->sport->sdev, ch->rq_size,
2110 ch->rsp_buf_cache, DMA_TO_DEVICE);
2111
2112 kmem_cache_destroy(ch->rsp_buf_cache);
2113
2114 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
2115 sdev, ch->rq_size,
2116 ch->req_buf_cache, DMA_FROM_DEVICE);
2117
2118 kmem_cache_destroy(ch->req_buf_cache);
2119
2120 kref_put(&ch->kref, srpt_free_ch);
2121}
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137static int srpt_cm_req_recv(struct srpt_device *const sdev,
2138 struct ib_cm_id *ib_cm_id,
2139 struct rdma_cm_id *rdma_cm_id,
2140 u8 port_num, __be16 pkey,
2141 const struct srp_login_req *req,
2142 const char *src_addr)
2143{
2144 struct srpt_port *sport = &sdev->port[port_num - 1];
2145 struct srpt_nexus *nexus;
2146 struct srp_login_rsp *rsp = NULL;
2147 struct srp_login_rej *rej = NULL;
2148 union {
2149 struct rdma_conn_param rdma_cm;
2150 struct ib_cm_rep_param ib_cm;
2151 } *rep_param = NULL;
2152 struct srpt_rdma_ch *ch = NULL;
2153 char i_port_id[36];
2154 u32 it_iu_len;
2155 int i, tag_num, tag_size, ret;
2156 struct srpt_tpg *stpg;
2157
2158 WARN_ON_ONCE(irqs_disabled());
2159
2160 it_iu_len = be32_to_cpu(req->req_it_iu_len);
2161
2162 pr_info("Received SRP_LOGIN_REQ with i_port_id %pI6, t_port_id %pI6 and it_iu_len %d on port %d (guid=%pI6); pkey %#04x\n",
2163 req->initiator_port_id, req->target_port_id, it_iu_len,
2164 port_num, &sport->gid, be16_to_cpu(pkey));
2165
2166 nexus = srpt_get_nexus(sport, req->initiator_port_id,
2167 req->target_port_id);
2168 if (IS_ERR(nexus)) {
2169 ret = PTR_ERR(nexus);
2170 goto out;
2171 }
2172
2173 ret = -ENOMEM;
2174 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
2175 rej = kzalloc(sizeof(*rej), GFP_KERNEL);
2176 rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL);
2177 if (!rsp || !rej || !rep_param)
2178 goto out;
2179
2180 ret = -EINVAL;
2181 if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
2182 rej->reason = cpu_to_be32(
2183 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
2184 pr_err("rejected SRP_LOGIN_REQ because its length (%d bytes) is out of range (%d .. %d)\n",
2185 it_iu_len, 64, srp_max_req_size);
2186 goto reject;
2187 }
2188
2189 if (!sport->enabled) {
2190 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2191 pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n",
2192 dev_name(&sport->sdev->device->dev), port_num);
2193 goto reject;
2194 }
2195
2196 if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
2197 || *(__be64 *)(req->target_port_id + 8) !=
2198 cpu_to_be64(srpt_service_guid)) {
2199 rej->reason = cpu_to_be32(
2200 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
2201 pr_err("rejected SRP_LOGIN_REQ because it has an invalid target port identifier.\n");
2202 goto reject;
2203 }
2204
2205 ret = -ENOMEM;
2206 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
2207 if (!ch) {
2208 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2209 pr_err("rejected SRP_LOGIN_REQ because out of memory.\n");
2210 goto reject;
2211 }
2212
2213 kref_init(&ch->kref);
2214 ch->pkey = be16_to_cpu(pkey);
2215 ch->nexus = nexus;
2216 ch->zw_cqe.done = srpt_zerolength_write_done;
2217 INIT_WORK(&ch->release_work, srpt_release_channel_work);
2218 ch->sport = sport;
2219 if (ib_cm_id) {
2220 ch->ib_cm.cm_id = ib_cm_id;
2221 ib_cm_id->context = ch;
2222 } else {
2223 ch->using_rdma_cm = true;
2224 ch->rdma_cm.cm_id = rdma_cm_id;
2225 rdma_cm_id->context = ch;
2226 }
2227
2228
2229
2230
2231
2232 ch->rq_size = min(MAX_SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
2233 spin_lock_init(&ch->spinlock);
2234 ch->state = CH_CONNECTING;
2235 INIT_LIST_HEAD(&ch->cmd_wait_list);
2236 ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
2237
2238 ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size,
2239 512, 0, NULL);
2240 if (!ch->rsp_buf_cache)
2241 goto free_ch;
2242
2243 ch->ioctx_ring = (struct srpt_send_ioctx **)
2244 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2245 sizeof(*ch->ioctx_ring[0]),
2246 ch->rsp_buf_cache, 0, DMA_TO_DEVICE);
2247 if (!ch->ioctx_ring) {
2248 pr_err("rejected SRP_LOGIN_REQ because creating a new QP SQ ring failed.\n");
2249 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2250 goto free_rsp_cache;
2251 }
2252
2253 for (i = 0; i < ch->rq_size; i++)
2254 ch->ioctx_ring[i]->ch = ch;
2255 if (!sdev->use_srq) {
2256 u16 imm_data_offset = req->req_flags & SRP_IMMED_REQUESTED ?
2257 be16_to_cpu(req->imm_data_offset) : 0;
2258 u16 alignment_offset;
2259 u32 req_sz;
2260
2261 if (req->req_flags & SRP_IMMED_REQUESTED)
2262 pr_debug("imm_data_offset = %d\n",
2263 be16_to_cpu(req->imm_data_offset));
2264 if (imm_data_offset >= sizeof(struct srp_cmd)) {
2265 ch->imm_data_offset = imm_data_offset;
2266 rsp->rsp_flags |= SRP_LOGIN_RSP_IMMED_SUPP;
2267 } else {
2268 ch->imm_data_offset = 0;
2269 }
2270 alignment_offset = round_up(imm_data_offset, 512) -
2271 imm_data_offset;
2272 req_sz = alignment_offset + imm_data_offset + srp_max_req_size;
2273 ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz,
2274 512, 0, NULL);
2275 if (!ch->req_buf_cache)
2276 goto free_rsp_ring;
2277
2278 ch->ioctx_recv_ring = (struct srpt_recv_ioctx **)
2279 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2280 sizeof(*ch->ioctx_recv_ring[0]),
2281 ch->req_buf_cache,
2282 alignment_offset,
2283 DMA_FROM_DEVICE);
2284 if (!ch->ioctx_recv_ring) {
2285 pr_err("rejected SRP_LOGIN_REQ because creating a new QP RQ ring failed.\n");
2286 rej->reason =
2287 cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2288 goto free_recv_cache;
2289 }
2290 for (i = 0; i < ch->rq_size; i++)
2291 INIT_LIST_HEAD(&ch->ioctx_recv_ring[i]->wait_list);
2292 }
2293
2294 ret = srpt_create_ch_ib(ch);
2295 if (ret) {
2296 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2297 pr_err("rejected SRP_LOGIN_REQ because creating a new RDMA channel failed.\n");
2298 goto free_recv_ring;
2299 }
2300
2301 strlcpy(ch->sess_name, src_addr, sizeof(ch->sess_name));
2302 snprintf(i_port_id, sizeof(i_port_id), "0x%016llx%016llx",
2303 be64_to_cpu(*(__be64 *)nexus->i_port_id),
2304 be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8)));
2305
2306 pr_debug("registering src addr %s or i_port_id %s\n", ch->sess_name,
2307 i_port_id);
2308
2309 tag_num = ch->rq_size;
2310 tag_size = 1;
2311
2312 mutex_lock(&sport->port_guid_id.mutex);
2313 list_for_each_entry(stpg, &sport->port_guid_id.tpg_list, entry) {
2314 if (!IS_ERR_OR_NULL(ch->sess))
2315 break;
2316 ch->sess = target_setup_session(&stpg->tpg, tag_num,
2317 tag_size, TARGET_PROT_NORMAL,
2318 ch->sess_name, ch, NULL);
2319 }
2320 mutex_unlock(&sport->port_guid_id.mutex);
2321
2322 mutex_lock(&sport->port_gid_id.mutex);
2323 list_for_each_entry(stpg, &sport->port_gid_id.tpg_list, entry) {
2324 if (!IS_ERR_OR_NULL(ch->sess))
2325 break;
2326 ch->sess = target_setup_session(&stpg->tpg, tag_num,
2327 tag_size, TARGET_PROT_NORMAL, i_port_id,
2328 ch, NULL);
2329 if (!IS_ERR_OR_NULL(ch->sess))
2330 break;
2331
2332 ch->sess = target_setup_session(&stpg->tpg, tag_num,
2333 tag_size, TARGET_PROT_NORMAL,
2334 i_port_id + 2, ch, NULL);
2335 }
2336 mutex_unlock(&sport->port_gid_id.mutex);
2337
2338 if (IS_ERR_OR_NULL(ch->sess)) {
2339 WARN_ON_ONCE(ch->sess == NULL);
2340 ret = PTR_ERR(ch->sess);
2341 ch->sess = NULL;
2342 pr_info("Rejected login for initiator %s: ret = %d.\n",
2343 ch->sess_name, ret);
2344 rej->reason = cpu_to_be32(ret == -ENOMEM ?
2345 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
2346 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2347 goto destroy_ib;
2348 }
2349
2350
2351
2352
2353
2354 atomic_inc(&sport->refcount);
2355
2356 mutex_lock(&sport->mutex);
2357
2358 if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
2359 struct srpt_rdma_ch *ch2;
2360
2361 list_for_each_entry(ch2, &nexus->ch_list, list) {
2362 if (srpt_disconnect_ch(ch2) < 0)
2363 continue;
2364 pr_info("Relogin - closed existing channel %s\n",
2365 ch2->sess_name);
2366 rsp->rsp_flags |= SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
2367 }
2368 } else {
2369 rsp->rsp_flags |= SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
2370 }
2371
2372 list_add_tail_rcu(&ch->list, &nexus->ch_list);
2373
2374 if (!sport->enabled) {
2375 rej->reason = cpu_to_be32(
2376 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2377 pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
2378 dev_name(&sdev->device->dev), port_num);
2379 mutex_unlock(&sport->mutex);
2380 goto reject;
2381 }
2382
2383 mutex_unlock(&sport->mutex);
2384
2385 ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rtr(ch, ch->qp);
2386 if (ret) {
2387 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2388 pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n",
2389 ret);
2390 goto reject;
2391 }
2392
2393 pr_debug("Establish connection sess=%p name=%s ch=%p\n", ch->sess,
2394 ch->sess_name, ch);
2395
2396
2397 rsp->opcode = SRP_LOGIN_RSP;
2398 rsp->tag = req->tag;
2399 rsp->max_it_iu_len = cpu_to_be32(srp_max_req_size);
2400 rsp->max_ti_iu_len = req->req_it_iu_len;
2401 ch->max_ti_iu_len = it_iu_len;
2402 rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
2403 SRP_BUF_FORMAT_INDIRECT);
2404 rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
2405 atomic_set(&ch->req_lim, ch->rq_size);
2406 atomic_set(&ch->req_lim_delta, 0);
2407
2408
2409 if (ch->using_rdma_cm) {
2410 rep_param->rdma_cm.private_data = (void *)rsp;
2411 rep_param->rdma_cm.private_data_len = sizeof(*rsp);
2412 rep_param->rdma_cm.rnr_retry_count = 7;
2413 rep_param->rdma_cm.flow_control = 1;
2414 rep_param->rdma_cm.responder_resources = 4;
2415 rep_param->rdma_cm.initiator_depth = 4;
2416 } else {
2417 rep_param->ib_cm.qp_num = ch->qp->qp_num;
2418 rep_param->ib_cm.private_data = (void *)rsp;
2419 rep_param->ib_cm.private_data_len = sizeof(*rsp);
2420 rep_param->ib_cm.rnr_retry_count = 7;
2421 rep_param->ib_cm.flow_control = 1;
2422 rep_param->ib_cm.failover_accepted = 0;
2423 rep_param->ib_cm.srq = 1;
2424 rep_param->ib_cm.responder_resources = 4;
2425 rep_param->ib_cm.initiator_depth = 4;
2426 }
2427
2428
2429
2430
2431
2432 mutex_lock(&sport->mutex);
2433 if (sport->enabled && ch->state == CH_CONNECTING) {
2434 if (ch->using_rdma_cm)
2435 ret = rdma_accept(rdma_cm_id, &rep_param->rdma_cm);
2436 else
2437 ret = ib_send_cm_rep(ib_cm_id, &rep_param->ib_cm);
2438 } else {
2439 ret = -EINVAL;
2440 }
2441 mutex_unlock(&sport->mutex);
2442
2443 switch (ret) {
2444 case 0:
2445 break;
2446 case -EINVAL:
2447 goto reject;
2448 default:
2449 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2450 pr_err("sending SRP_LOGIN_REQ response failed (error code = %d)\n",
2451 ret);
2452 goto reject;
2453 }
2454
2455 goto out;
2456
2457destroy_ib:
2458 srpt_destroy_ch_ib(ch);
2459
2460free_recv_ring:
2461 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
2462 ch->sport->sdev, ch->rq_size,
2463 ch->req_buf_cache, DMA_FROM_DEVICE);
2464
2465free_recv_cache:
2466 kmem_cache_destroy(ch->req_buf_cache);
2467
2468free_rsp_ring:
2469 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2470 ch->sport->sdev, ch->rq_size,
2471 ch->rsp_buf_cache, DMA_TO_DEVICE);
2472
2473free_rsp_cache:
2474 kmem_cache_destroy(ch->rsp_buf_cache);
2475
2476free_ch:
2477 if (rdma_cm_id)
2478 rdma_cm_id->context = NULL;
2479 else
2480 ib_cm_id->context = NULL;
2481 kfree(ch);
2482 ch = NULL;
2483
2484 WARN_ON_ONCE(ret == 0);
2485
2486reject:
2487 pr_info("Rejecting login with reason %#x\n", be32_to_cpu(rej->reason));
2488 rej->opcode = SRP_LOGIN_REJ;
2489 rej->tag = req->tag;
2490 rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
2491 SRP_BUF_FORMAT_INDIRECT);
2492
2493 if (rdma_cm_id)
2494 rdma_reject(rdma_cm_id, rej, sizeof(*rej),
2495 IB_CM_REJ_CONSUMER_DEFINED);
2496 else
2497 ib_send_cm_rej(ib_cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
2498 rej, sizeof(*rej));
2499
2500 if (ch && ch->sess) {
2501 srpt_close_ch(ch);
2502
2503
2504
2505
2506 ret = 0;
2507 }
2508
2509out:
2510 kfree(rep_param);
2511 kfree(rsp);
2512 kfree(rej);
2513
2514 return ret;
2515}
2516
2517static int srpt_ib_cm_req_recv(struct ib_cm_id *cm_id,
2518 const struct ib_cm_req_event_param *param,
2519 void *private_data)
2520{
2521 char sguid[40];
2522
2523 srpt_format_guid(sguid, sizeof(sguid),
2524 ¶m->primary_path->dgid.global.interface_id);
2525
2526 return srpt_cm_req_recv(cm_id->context, cm_id, NULL, param->port,
2527 param->primary_path->pkey,
2528 private_data, sguid);
2529}
2530
2531static int srpt_rdma_cm_req_recv(struct rdma_cm_id *cm_id,
2532 struct rdma_cm_event *event)
2533{
2534 struct srpt_device *sdev;
2535 struct srp_login_req req;
2536 const struct srp_login_req_rdma *req_rdma;
2537 struct sa_path_rec *path_rec = cm_id->route.path_rec;
2538 char src_addr[40];
2539
2540 sdev = ib_get_client_data(cm_id->device, &srpt_client);
2541 if (!sdev)
2542 return -ECONNREFUSED;
2543
2544 if (event->param.conn.private_data_len < sizeof(*req_rdma))
2545 return -EINVAL;
2546
2547
2548 req_rdma = event->param.conn.private_data;
2549 memset(&req, 0, sizeof(req));
2550 req.opcode = req_rdma->opcode;
2551 req.tag = req_rdma->tag;
2552 req.req_it_iu_len = req_rdma->req_it_iu_len;
2553 req.req_buf_fmt = req_rdma->req_buf_fmt;
2554 req.req_flags = req_rdma->req_flags;
2555 memcpy(req.initiator_port_id, req_rdma->initiator_port_id, 16);
2556 memcpy(req.target_port_id, req_rdma->target_port_id, 16);
2557 req.imm_data_offset = req_rdma->imm_data_offset;
2558
2559 snprintf(src_addr, sizeof(src_addr), "%pIS",
2560 &cm_id->route.addr.src_addr);
2561
2562 return srpt_cm_req_recv(sdev, NULL, cm_id, cm_id->port_num,
2563 path_rec ? path_rec->pkey : 0, &req, src_addr);
2564}
2565
2566static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
2567 enum ib_cm_rej_reason reason,
2568 const u8 *private_data,
2569 u8 private_data_len)
2570{
2571 char *priv = NULL;
2572 int i;
2573
2574 if (private_data_len && (priv = kmalloc(private_data_len * 3 + 1,
2575 GFP_KERNEL))) {
2576 for (i = 0; i < private_data_len; i++)
2577 sprintf(priv + 3 * i, " %02x", private_data[i]);
2578 }
2579 pr_info("Received CM REJ for ch %s-%d; reason %d%s%s.\n",
2580 ch->sess_name, ch->qp->qp_num, reason, private_data_len ?
2581 "; private data" : "", priv ? priv : " (?)");
2582 kfree(priv);
2583}
2584
2585
2586
2587
2588
2589
2590
2591
2592static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
2593{
2594 int ret;
2595
2596 ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rts(ch, ch->qp);
2597 if (ret < 0) {
2598 pr_err("%s-%d: QP transition to RTS failed\n", ch->sess_name,
2599 ch->qp->qp_num);
2600 srpt_close_ch(ch);
2601 return;
2602 }
2603
2604
2605
2606
2607
2608
2609 if (!srpt_set_ch_state(ch, CH_LIVE)) {
2610 pr_err("%s-%d: channel transition to LIVE state failed\n",
2611 ch->sess_name, ch->qp->qp_num);
2612 return;
2613 }
2614
2615
2616 ret = srpt_zerolength_write(ch);
2617 WARN_ONCE(ret < 0, "%d\n", ret);
2618}
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632static int srpt_cm_handler(struct ib_cm_id *cm_id,
2633 const struct ib_cm_event *event)
2634{
2635 struct srpt_rdma_ch *ch = cm_id->context;
2636 int ret;
2637
2638 ret = 0;
2639 switch (event->event) {
2640 case IB_CM_REQ_RECEIVED:
2641 ret = srpt_ib_cm_req_recv(cm_id, &event->param.req_rcvd,
2642 event->private_data);
2643 break;
2644 case IB_CM_REJ_RECEIVED:
2645 srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason,
2646 event->private_data,
2647 IB_CM_REJ_PRIVATE_DATA_SIZE);
2648 break;
2649 case IB_CM_RTU_RECEIVED:
2650 case IB_CM_USER_ESTABLISHED:
2651 srpt_cm_rtu_recv(ch);
2652 break;
2653 case IB_CM_DREQ_RECEIVED:
2654 srpt_disconnect_ch(ch);
2655 break;
2656 case IB_CM_DREP_RECEIVED:
2657 pr_info("Received CM DREP message for ch %s-%d.\n",
2658 ch->sess_name, ch->qp->qp_num);
2659 srpt_close_ch(ch);
2660 break;
2661 case IB_CM_TIMEWAIT_EXIT:
2662 pr_info("Received CM TimeWait exit for ch %s-%d.\n",
2663 ch->sess_name, ch->qp->qp_num);
2664 srpt_close_ch(ch);
2665 break;
2666 case IB_CM_REP_ERROR:
2667 pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
2668 ch->qp->qp_num);
2669 break;
2670 case IB_CM_DREQ_ERROR:
2671 pr_info("Received CM DREQ ERROR event.\n");
2672 break;
2673 case IB_CM_MRA_RECEIVED:
2674 pr_info("Received CM MRA event\n");
2675 break;
2676 default:
2677 pr_err("received unrecognized CM event %d\n", event->event);
2678 break;
2679 }
2680
2681 return ret;
2682}
2683
2684static int srpt_rdma_cm_handler(struct rdma_cm_id *cm_id,
2685 struct rdma_cm_event *event)
2686{
2687 struct srpt_rdma_ch *ch = cm_id->context;
2688 int ret = 0;
2689
2690 switch (event->event) {
2691 case RDMA_CM_EVENT_CONNECT_REQUEST:
2692 ret = srpt_rdma_cm_req_recv(cm_id, event);
2693 break;
2694 case RDMA_CM_EVENT_REJECTED:
2695 srpt_cm_rej_recv(ch, event->status,
2696 event->param.conn.private_data,
2697 event->param.conn.private_data_len);
2698 break;
2699 case RDMA_CM_EVENT_ESTABLISHED:
2700 srpt_cm_rtu_recv(ch);
2701 break;
2702 case RDMA_CM_EVENT_DISCONNECTED:
2703 if (ch->state < CH_DISCONNECTING)
2704 srpt_disconnect_ch(ch);
2705 else
2706 srpt_close_ch(ch);
2707 break;
2708 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2709 srpt_close_ch(ch);
2710 break;
2711 case RDMA_CM_EVENT_UNREACHABLE:
2712 pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
2713 ch->qp->qp_num);
2714 break;
2715 case RDMA_CM_EVENT_DEVICE_REMOVAL:
2716 case RDMA_CM_EVENT_ADDR_CHANGE:
2717 break;
2718 default:
2719 pr_err("received unrecognized RDMA CM event %d\n",
2720 event->event);
2721 break;
2722 }
2723
2724 return ret;
2725}
2726
2727
2728
2729
2730static int srpt_write_pending(struct se_cmd *se_cmd)
2731{
2732 struct srpt_send_ioctx *ioctx =
2733 container_of(se_cmd, struct srpt_send_ioctx, cmd);
2734 struct srpt_rdma_ch *ch = ioctx->ch;
2735 struct ib_send_wr *first_wr = NULL;
2736 struct ib_cqe *cqe = &ioctx->rdma_cqe;
2737 enum srpt_command_state new_state;
2738 int ret, i;
2739
2740 if (ioctx->recv_ioctx) {
2741 srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
2742 target_execute_cmd(&ioctx->cmd);
2743 return 0;
2744 }
2745
2746 new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
2747 WARN_ON(new_state == SRPT_STATE_DONE);
2748
2749 if (atomic_sub_return(ioctx->n_rdma, &ch->sq_wr_avail) < 0) {
2750 pr_warn("%s: IB send queue full (needed %d)\n",
2751 __func__, ioctx->n_rdma);
2752 ret = -ENOMEM;
2753 goto out_undo;
2754 }
2755
2756 cqe->done = srpt_rdma_read_done;
2757 for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
2758 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
2759
2760 first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp, ch->sport->port,
2761 cqe, first_wr);
2762 cqe = NULL;
2763 }
2764
2765 ret = ib_post_send(ch->qp, first_wr, NULL);
2766 if (ret) {
2767 pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n",
2768 __func__, ret, ioctx->n_rdma,
2769 atomic_read(&ch->sq_wr_avail));
2770 goto out_undo;
2771 }
2772
2773 return 0;
2774out_undo:
2775 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
2776 return ret;
2777}
2778
2779static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
2780{
2781 switch (tcm_mgmt_status) {
2782 case TMR_FUNCTION_COMPLETE:
2783 return SRP_TSK_MGMT_SUCCESS;
2784 case TMR_FUNCTION_REJECTED:
2785 return SRP_TSK_MGMT_FUNC_NOT_SUPP;
2786 }
2787 return SRP_TSK_MGMT_FAILED;
2788}
2789
2790
2791
2792
2793
2794
2795
2796
2797static void srpt_queue_response(struct se_cmd *cmd)
2798{
2799 struct srpt_send_ioctx *ioctx =
2800 container_of(cmd, struct srpt_send_ioctx, cmd);
2801 struct srpt_rdma_ch *ch = ioctx->ch;
2802 struct srpt_device *sdev = ch->sport->sdev;
2803 struct ib_send_wr send_wr, *first_wr = &send_wr;
2804 struct ib_sge sge;
2805 enum srpt_command_state state;
2806 int resp_len, ret, i;
2807 u8 srp_tm_status;
2808
2809 state = ioctx->state;
2810 switch (state) {
2811 case SRPT_STATE_NEW:
2812 case SRPT_STATE_DATA_IN:
2813 ioctx->state = SRPT_STATE_CMD_RSP_SENT;
2814 break;
2815 case SRPT_STATE_MGMT:
2816 ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
2817 break;
2818 default:
2819 WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
2820 ch, ioctx->ioctx.index, ioctx->state);
2821 break;
2822 }
2823
2824 if (WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))
2825 return;
2826
2827
2828 if (ioctx->cmd.data_direction == DMA_FROM_DEVICE &&
2829 ioctx->cmd.data_length &&
2830 !ioctx->queue_status_only) {
2831 for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
2832 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
2833
2834 first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp,
2835 ch->sport->port, NULL, first_wr);
2836 }
2837 }
2838
2839 if (state != SRPT_STATE_MGMT)
2840 resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag,
2841 cmd->scsi_status);
2842 else {
2843 srp_tm_status
2844 = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
2845 resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
2846 ioctx->cmd.tag);
2847 }
2848
2849 atomic_inc(&ch->req_lim);
2850
2851 if (unlikely(atomic_sub_return(1 + ioctx->n_rdma,
2852 &ch->sq_wr_avail) < 0)) {
2853 pr_warn("%s: IB send queue full (needed %d)\n",
2854 __func__, ioctx->n_rdma);
2855 ret = -ENOMEM;
2856 goto out;
2857 }
2858
2859 ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, resp_len,
2860 DMA_TO_DEVICE);
2861
2862 sge.addr = ioctx->ioctx.dma;
2863 sge.length = resp_len;
2864 sge.lkey = sdev->lkey;
2865
2866 ioctx->ioctx.cqe.done = srpt_send_done;
2867 send_wr.next = NULL;
2868 send_wr.wr_cqe = &ioctx->ioctx.cqe;
2869 send_wr.sg_list = &sge;
2870 send_wr.num_sge = 1;
2871 send_wr.opcode = IB_WR_SEND;
2872 send_wr.send_flags = IB_SEND_SIGNALED;
2873
2874 ret = ib_post_send(ch->qp, first_wr, NULL);
2875 if (ret < 0) {
2876 pr_err("%s: sending cmd response failed for tag %llu (%d)\n",
2877 __func__, ioctx->cmd.tag, ret);
2878 goto out;
2879 }
2880
2881 return;
2882
2883out:
2884 atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
2885 atomic_dec(&ch->req_lim);
2886 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
2887 target_put_sess_cmd(&ioctx->cmd);
2888}
2889
2890static int srpt_queue_data_in(struct se_cmd *cmd)
2891{
2892 srpt_queue_response(cmd);
2893 return 0;
2894}
2895
2896static void srpt_queue_tm_rsp(struct se_cmd *cmd)
2897{
2898 srpt_queue_response(cmd);
2899}
2900
2901
2902
2903
2904
2905
2906
2907static void srpt_aborted_task(struct se_cmd *cmd)
2908{
2909 struct srpt_send_ioctx *ioctx = container_of(cmd,
2910 struct srpt_send_ioctx, cmd);
2911 struct srpt_rdma_ch *ch = ioctx->ch;
2912
2913 atomic_inc(&ch->req_lim_delta);
2914}
2915
2916static int srpt_queue_status(struct se_cmd *cmd)
2917{
2918 struct srpt_send_ioctx *ioctx;
2919
2920 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
2921 BUG_ON(ioctx->sense_data != cmd->sense_buffer);
2922 if (cmd->se_cmd_flags &
2923 (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
2924 WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
2925 ioctx->queue_status_only = true;
2926 srpt_queue_response(cmd);
2927 return 0;
2928}
2929
2930static void srpt_refresh_port_work(struct work_struct *work)
2931{
2932 struct srpt_port *sport = container_of(work, struct srpt_port, work);
2933
2934 srpt_refresh_port(sport);
2935}
2936
2937
2938
2939
2940
2941static int srpt_release_sport(struct srpt_port *sport)
2942{
2943 DECLARE_COMPLETION_ONSTACK(c);
2944 struct srpt_nexus *nexus, *next_n;
2945 struct srpt_rdma_ch *ch;
2946
2947 WARN_ON_ONCE(irqs_disabled());
2948
2949 sport->freed_channels = &c;
2950
2951 mutex_lock(&sport->mutex);
2952 srpt_set_enabled(sport, false);
2953 mutex_unlock(&sport->mutex);
2954
2955 while (atomic_read(&sport->refcount) > 0 &&
2956 wait_for_completion_timeout(&c, 5 * HZ) <= 0) {
2957 pr_info("%s_%d: waiting for unregistration of %d sessions ...\n",
2958 dev_name(&sport->sdev->device->dev), sport->port,
2959 atomic_read(&sport->refcount));
2960 rcu_read_lock();
2961 list_for_each_entry(nexus, &sport->nexus_list, entry) {
2962 list_for_each_entry(ch, &nexus->ch_list, list) {
2963 pr_info("%s-%d: state %s\n",
2964 ch->sess_name, ch->qp->qp_num,
2965 get_ch_state_name(ch->state));
2966 }
2967 }
2968 rcu_read_unlock();
2969 }
2970
2971 mutex_lock(&sport->mutex);
2972 list_for_each_entry_safe(nexus, next_n, &sport->nexus_list, entry) {
2973 list_del(&nexus->entry);
2974 kfree_rcu(nexus, rcu);
2975 }
2976 mutex_unlock(&sport->mutex);
2977
2978 return 0;
2979}
2980
2981static struct se_wwn *__srpt_lookup_wwn(const char *name)
2982{
2983 struct ib_device *dev;
2984 struct srpt_device *sdev;
2985 struct srpt_port *sport;
2986 int i;
2987
2988 list_for_each_entry(sdev, &srpt_dev_list, list) {
2989 dev = sdev->device;
2990 if (!dev)
2991 continue;
2992
2993 for (i = 0; i < dev->phys_port_cnt; i++) {
2994 sport = &sdev->port[i];
2995
2996 if (strcmp(sport->port_guid_id.name, name) == 0)
2997 return &sport->port_guid_id.wwn;
2998 if (strcmp(sport->port_gid_id.name, name) == 0)
2999 return &sport->port_gid_id.wwn;
3000 }
3001 }
3002
3003 return NULL;
3004}
3005
3006static struct se_wwn *srpt_lookup_wwn(const char *name)
3007{
3008 struct se_wwn *wwn;
3009
3010 spin_lock(&srpt_dev_lock);
3011 wwn = __srpt_lookup_wwn(name);
3012 spin_unlock(&srpt_dev_lock);
3013
3014 return wwn;
3015}
3016
3017static void srpt_free_srq(struct srpt_device *sdev)
3018{
3019 if (!sdev->srq)
3020 return;
3021
3022 ib_destroy_srq(sdev->srq);
3023 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
3024 sdev->srq_size, sdev->req_buf_cache,
3025 DMA_FROM_DEVICE);
3026 kmem_cache_destroy(sdev->req_buf_cache);
3027 sdev->srq = NULL;
3028}
3029
3030static int srpt_alloc_srq(struct srpt_device *sdev)
3031{
3032 struct ib_srq_init_attr srq_attr = {
3033 .event_handler = srpt_srq_event,
3034 .srq_context = (void *)sdev,
3035 .attr.max_wr = sdev->srq_size,
3036 .attr.max_sge = 1,
3037 .srq_type = IB_SRQT_BASIC,
3038 };
3039 struct ib_device *device = sdev->device;
3040 struct ib_srq *srq;
3041 int i;
3042
3043 WARN_ON_ONCE(sdev->srq);
3044 srq = ib_create_srq(sdev->pd, &srq_attr);
3045 if (IS_ERR(srq)) {
3046 pr_debug("ib_create_srq() failed: %ld\n", PTR_ERR(srq));
3047 return PTR_ERR(srq);
3048 }
3049
3050 pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
3051 sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
3052
3053 sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf",
3054 srp_max_req_size, 0, 0, NULL);
3055 if (!sdev->req_buf_cache)
3056 goto free_srq;
3057
3058 sdev->ioctx_ring = (struct srpt_recv_ioctx **)
3059 srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
3060 sizeof(*sdev->ioctx_ring[0]),
3061 sdev->req_buf_cache, 0, DMA_FROM_DEVICE);
3062 if (!sdev->ioctx_ring)
3063 goto free_cache;
3064
3065 sdev->use_srq = true;
3066 sdev->srq = srq;
3067
3068 for (i = 0; i < sdev->srq_size; ++i) {
3069 INIT_LIST_HEAD(&sdev->ioctx_ring[i]->wait_list);
3070 srpt_post_recv(sdev, NULL, sdev->ioctx_ring[i]);
3071 }
3072
3073 return 0;
3074
3075free_cache:
3076 kmem_cache_destroy(sdev->req_buf_cache);
3077
3078free_srq:
3079 ib_destroy_srq(srq);
3080 return -ENOMEM;
3081}
3082
3083static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
3084{
3085 struct ib_device *device = sdev->device;
3086 int ret = 0;
3087
3088 if (!use_srq) {
3089 srpt_free_srq(sdev);
3090 sdev->use_srq = false;
3091 } else if (use_srq && !sdev->srq) {
3092 ret = srpt_alloc_srq(sdev);
3093 }
3094 pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__,
3095 dev_name(&device->dev), sdev->use_srq, ret);
3096 return ret;
3097}
3098
3099
3100
3101
3102
3103static int srpt_add_one(struct ib_device *device)
3104{
3105 struct srpt_device *sdev;
3106 struct srpt_port *sport;
3107 int i, ret;
3108
3109 pr_debug("device = %p\n", device);
3110
3111 sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt),
3112 GFP_KERNEL);
3113 if (!sdev)
3114 return -ENOMEM;
3115
3116 sdev->device = device;
3117 mutex_init(&sdev->sdev_mutex);
3118
3119 sdev->pd = ib_alloc_pd(device, 0);
3120 if (IS_ERR(sdev->pd)) {
3121 ret = PTR_ERR(sdev->pd);
3122 goto free_dev;
3123 }
3124
3125 sdev->lkey = sdev->pd->local_dma_lkey;
3126
3127 sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr);
3128
3129 srpt_use_srq(sdev, sdev->port[0].port_attrib.use_srq);
3130
3131 if (!srpt_service_guid)
3132 srpt_service_guid = be64_to_cpu(device->node_guid);
3133
3134 if (rdma_port_get_link_layer(device, 1) == IB_LINK_LAYER_INFINIBAND)
3135 sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
3136 if (IS_ERR(sdev->cm_id)) {
3137 pr_info("ib_create_cm_id() failed: %ld\n",
3138 PTR_ERR(sdev->cm_id));
3139 ret = PTR_ERR(sdev->cm_id);
3140 sdev->cm_id = NULL;
3141 if (!rdma_cm_id)
3142 goto err_ring;
3143 }
3144
3145
3146 pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,pkey=ffff,service_id=%016llx\n",
3147 srpt_service_guid, srpt_service_guid, srpt_service_guid);
3148
3149
3150
3151
3152
3153
3154
3155 ret = sdev->cm_id ?
3156 ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0) :
3157 0;
3158 if (ret < 0) {
3159 pr_err("ib_cm_listen() failed: %d (cm_id state = %d)\n", ret,
3160 sdev->cm_id->state);
3161 goto err_cm;
3162 }
3163
3164 INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
3165 srpt_event_handler);
3166 ib_register_event_handler(&sdev->event_handler);
3167
3168 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
3169 sport = &sdev->port[i - 1];
3170 INIT_LIST_HEAD(&sport->nexus_list);
3171 mutex_init(&sport->mutex);
3172 sport->sdev = sdev;
3173 sport->port = i;
3174 sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
3175 sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
3176 sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
3177 sport->port_attrib.use_srq = false;
3178 INIT_WORK(&sport->work, srpt_refresh_port_work);
3179 mutex_init(&sport->port_guid_id.mutex);
3180 INIT_LIST_HEAD(&sport->port_guid_id.tpg_list);
3181 mutex_init(&sport->port_gid_id.mutex);
3182 INIT_LIST_HEAD(&sport->port_gid_id.tpg_list);
3183
3184 ret = srpt_refresh_port(sport);
3185 if (ret) {
3186 pr_err("MAD registration failed for %s-%d.\n",
3187 dev_name(&sdev->device->dev), i);
3188 goto err_event;
3189 }
3190 }
3191
3192 spin_lock(&srpt_dev_lock);
3193 list_add_tail(&sdev->list, &srpt_dev_list);
3194 spin_unlock(&srpt_dev_lock);
3195
3196 ib_set_client_data(device, &srpt_client, sdev);
3197 pr_debug("added %s.\n", dev_name(&device->dev));
3198 return 0;
3199
3200err_event:
3201 ib_unregister_event_handler(&sdev->event_handler);
3202err_cm:
3203 if (sdev->cm_id)
3204 ib_destroy_cm_id(sdev->cm_id);
3205err_ring:
3206 srpt_free_srq(sdev);
3207 ib_dealloc_pd(sdev->pd);
3208free_dev:
3209 kfree(sdev);
3210 pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev));
3211 return ret;
3212}
3213
3214
3215
3216
3217
3218
3219static void srpt_remove_one(struct ib_device *device, void *client_data)
3220{
3221 struct srpt_device *sdev = client_data;
3222 int i;
3223
3224 srpt_unregister_mad_agent(sdev);
3225
3226 ib_unregister_event_handler(&sdev->event_handler);
3227
3228
3229 for (i = 0; i < sdev->device->phys_port_cnt; i++)
3230 cancel_work_sync(&sdev->port[i].work);
3231
3232 if (sdev->cm_id)
3233 ib_destroy_cm_id(sdev->cm_id);
3234
3235 ib_set_client_data(device, &srpt_client, NULL);
3236
3237
3238
3239
3240
3241
3242 spin_lock(&srpt_dev_lock);
3243 list_del(&sdev->list);
3244 spin_unlock(&srpt_dev_lock);
3245
3246 for (i = 0; i < sdev->device->phys_port_cnt; i++)
3247 srpt_release_sport(&sdev->port[i]);
3248
3249 srpt_free_srq(sdev);
3250
3251 ib_dealloc_pd(sdev->pd);
3252
3253 kfree(sdev);
3254}
3255
3256static struct ib_client srpt_client = {
3257 .name = DRV_NAME,
3258 .add = srpt_add_one,
3259 .remove = srpt_remove_one
3260};
3261
3262static int srpt_check_true(struct se_portal_group *se_tpg)
3263{
3264 return 1;
3265}
3266
3267static int srpt_check_false(struct se_portal_group *se_tpg)
3268{
3269 return 0;
3270}
3271
3272static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg)
3273{
3274 return tpg->se_tpg_wwn->priv;
3275}
3276
3277static struct srpt_port_id *srpt_wwn_to_sport_id(struct se_wwn *wwn)
3278{
3279 struct srpt_port *sport = wwn->priv;
3280
3281 if (wwn == &sport->port_guid_id.wwn)
3282 return &sport->port_guid_id;
3283 if (wwn == &sport->port_gid_id.wwn)
3284 return &sport->port_gid_id;
3285 WARN_ON_ONCE(true);
3286 return NULL;
3287}
3288
3289static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
3290{
3291 struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
3292
3293 return stpg->sport_id->name;
3294}
3295
3296static u16 srpt_get_tag(struct se_portal_group *tpg)
3297{
3298 return 1;
3299}
3300
3301static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
3302{
3303 return 1;
3304}
3305
3306static void srpt_release_cmd(struct se_cmd *se_cmd)
3307{
3308 struct srpt_send_ioctx *ioctx = container_of(se_cmd,
3309 struct srpt_send_ioctx, cmd);
3310 struct srpt_rdma_ch *ch = ioctx->ch;
3311 struct srpt_recv_ioctx *recv_ioctx = ioctx->recv_ioctx;
3312
3313 WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE &&
3314 !(ioctx->cmd.transport_state & CMD_T_ABORTED));
3315
3316 if (recv_ioctx) {
3317 WARN_ON_ONCE(!list_empty(&recv_ioctx->wait_list));
3318 ioctx->recv_ioctx = NULL;
3319 srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
3320 }
3321
3322 if (ioctx->n_rw_ctx) {
3323 srpt_free_rw_ctxs(ch, ioctx);
3324 ioctx->n_rw_ctx = 0;
3325 }
3326
3327 target_free_tag(se_cmd->se_sess, se_cmd);
3328}
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338static void srpt_close_session(struct se_session *se_sess)
3339{
3340 struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
3341
3342 srpt_disconnect_ch_sync(ch);
3343}
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354static u32 srpt_sess_get_index(struct se_session *se_sess)
3355{
3356 return 0;
3357}
3358
3359static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
3360{
3361}
3362
3363
3364static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
3365{
3366 struct srpt_send_ioctx *ioctx;
3367
3368 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
3369 return ioctx->state;
3370}
3371
3372static int srpt_parse_guid(u64 *guid, const char *name)
3373{
3374 u16 w[4];
3375 int ret = -EINVAL;
3376
3377 if (sscanf(name, "%hx:%hx:%hx:%hx", &w[0], &w[1], &w[2], &w[3]) != 4)
3378 goto out;
3379 *guid = get_unaligned_be64(w);
3380 ret = 0;
3381out:
3382 return ret;
3383}
3384
3385
3386
3387
3388
3389
3390static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
3391{
3392 const char *p;
3393 unsigned len, count, leading_zero_bytes;
3394 int ret;
3395
3396 p = name;
3397 if (strncasecmp(p, "0x", 2) == 0)
3398 p += 2;
3399 ret = -EINVAL;
3400 len = strlen(p);
3401 if (len % 2)
3402 goto out;
3403 count = min(len / 2, 16U);
3404 leading_zero_bytes = 16 - count;
3405 memset(i_port_id, 0, leading_zero_bytes);
3406 ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
3407
3408out:
3409 return ret;
3410}
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
3425{
3426 struct sockaddr_storage sa;
3427 u64 guid;
3428 u8 i_port_id[16];
3429 int ret;
3430
3431 ret = srpt_parse_guid(&guid, name);
3432 if (ret < 0)
3433 ret = srpt_parse_i_port_id(i_port_id, name);
3434 if (ret < 0)
3435 ret = inet_pton_with_scope(&init_net, AF_UNSPEC, name, NULL,
3436 &sa);
3437 if (ret < 0)
3438 pr_err("invalid initiator port ID %s\n", name);
3439 return ret;
3440}
3441
3442static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item,
3443 char *page)
3444{
3445 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3446 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3447
3448 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
3449}
3450
3451static ssize_t srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item *item,
3452 const char *page, size_t count)
3453{
3454 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3455 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3456 unsigned long val;
3457 int ret;
3458
3459 ret = kstrtoul(page, 0, &val);
3460 if (ret < 0) {
3461 pr_err("kstrtoul() failed with ret: %d\n", ret);
3462 return -EINVAL;
3463 }
3464 if (val > MAX_SRPT_RDMA_SIZE) {
3465 pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
3466 MAX_SRPT_RDMA_SIZE);
3467 return -EINVAL;
3468 }
3469 if (val < DEFAULT_MAX_RDMA_SIZE) {
3470 pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
3471 val, DEFAULT_MAX_RDMA_SIZE);
3472 return -EINVAL;
3473 }
3474 sport->port_attrib.srp_max_rdma_size = val;
3475
3476 return count;
3477}
3478
3479static ssize_t srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item *item,
3480 char *page)
3481{
3482 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3483 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3484
3485 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
3486}
3487
3488static ssize_t srpt_tpg_attrib_srp_max_rsp_size_store(struct config_item *item,
3489 const char *page, size_t count)
3490{
3491 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3492 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3493 unsigned long val;
3494 int ret;
3495
3496 ret = kstrtoul(page, 0, &val);
3497 if (ret < 0) {
3498 pr_err("kstrtoul() failed with ret: %d\n", ret);
3499 return -EINVAL;
3500 }
3501 if (val > MAX_SRPT_RSP_SIZE) {
3502 pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
3503 MAX_SRPT_RSP_SIZE);
3504 return -EINVAL;
3505 }
3506 if (val < MIN_MAX_RSP_SIZE) {
3507 pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
3508 MIN_MAX_RSP_SIZE);
3509 return -EINVAL;
3510 }
3511 sport->port_attrib.srp_max_rsp_size = val;
3512
3513 return count;
3514}
3515
3516static ssize_t srpt_tpg_attrib_srp_sq_size_show(struct config_item *item,
3517 char *page)
3518{
3519 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3520 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3521
3522 return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
3523}
3524
3525static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item,
3526 const char *page, size_t count)
3527{
3528 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3529 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3530 unsigned long val;
3531 int ret;
3532
3533 ret = kstrtoul(page, 0, &val);
3534 if (ret < 0) {
3535 pr_err("kstrtoul() failed with ret: %d\n", ret);
3536 return -EINVAL;
3537 }
3538 if (val > MAX_SRPT_SRQ_SIZE) {
3539 pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
3540 MAX_SRPT_SRQ_SIZE);
3541 return -EINVAL;
3542 }
3543 if (val < MIN_SRPT_SRQ_SIZE) {
3544 pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
3545 MIN_SRPT_SRQ_SIZE);
3546 return -EINVAL;
3547 }
3548 sport->port_attrib.srp_sq_size = val;
3549
3550 return count;
3551}
3552
3553static ssize_t srpt_tpg_attrib_use_srq_show(struct config_item *item,
3554 char *page)
3555{
3556 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3557 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3558
3559 return sprintf(page, "%d\n", sport->port_attrib.use_srq);
3560}
3561
3562static ssize_t srpt_tpg_attrib_use_srq_store(struct config_item *item,
3563 const char *page, size_t count)
3564{
3565 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3566 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3567 struct srpt_device *sdev = sport->sdev;
3568 unsigned long val;
3569 bool enabled;
3570 int ret;
3571
3572 ret = kstrtoul(page, 0, &val);
3573 if (ret < 0)
3574 return ret;
3575 if (val != !!val)
3576 return -EINVAL;
3577
3578 ret = mutex_lock_interruptible(&sdev->sdev_mutex);
3579 if (ret < 0)
3580 return ret;
3581 ret = mutex_lock_interruptible(&sport->mutex);
3582 if (ret < 0)
3583 goto unlock_sdev;
3584 enabled = sport->enabled;
3585
3586 srpt_set_enabled(sport, false);
3587 sport->port_attrib.use_srq = val;
3588 srpt_use_srq(sdev, sport->port_attrib.use_srq);
3589 srpt_set_enabled(sport, enabled);
3590 ret = count;
3591 mutex_unlock(&sport->mutex);
3592unlock_sdev:
3593 mutex_unlock(&sdev->sdev_mutex);
3594
3595 return ret;
3596}
3597
3598CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rdma_size);
3599CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rsp_size);
3600CONFIGFS_ATTR(srpt_tpg_attrib_, srp_sq_size);
3601CONFIGFS_ATTR(srpt_tpg_attrib_, use_srq);
3602
3603static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
3604 &srpt_tpg_attrib_attr_srp_max_rdma_size,
3605 &srpt_tpg_attrib_attr_srp_max_rsp_size,
3606 &srpt_tpg_attrib_attr_srp_sq_size,
3607 &srpt_tpg_attrib_attr_use_srq,
3608 NULL,
3609};
3610
3611static struct rdma_cm_id *srpt_create_rdma_id(struct sockaddr *listen_addr)
3612{
3613 struct rdma_cm_id *rdma_cm_id;
3614 int ret;
3615
3616 rdma_cm_id = rdma_create_id(&init_net, srpt_rdma_cm_handler,
3617 NULL, RDMA_PS_TCP, IB_QPT_RC);
3618 if (IS_ERR(rdma_cm_id)) {
3619 pr_err("RDMA/CM ID creation failed: %ld\n",
3620 PTR_ERR(rdma_cm_id));
3621 goto out;
3622 }
3623
3624 ret = rdma_bind_addr(rdma_cm_id, listen_addr);
3625 if (ret) {
3626 char addr_str[64];
3627
3628 snprintf(addr_str, sizeof(addr_str), "%pISp", listen_addr);
3629 pr_err("Binding RDMA/CM ID to address %s failed: %d\n",
3630 addr_str, ret);
3631 rdma_destroy_id(rdma_cm_id);
3632 rdma_cm_id = ERR_PTR(ret);
3633 goto out;
3634 }
3635
3636 ret = rdma_listen(rdma_cm_id, 128);
3637 if (ret) {
3638 pr_err("rdma_listen() failed: %d\n", ret);
3639 rdma_destroy_id(rdma_cm_id);
3640 rdma_cm_id = ERR_PTR(ret);
3641 }
3642
3643out:
3644 return rdma_cm_id;
3645}
3646
3647static ssize_t srpt_rdma_cm_port_show(struct config_item *item, char *page)
3648{
3649 return sprintf(page, "%d\n", rdma_cm_port);
3650}
3651
3652static ssize_t srpt_rdma_cm_port_store(struct config_item *item,
3653 const char *page, size_t count)
3654{
3655 struct sockaddr_in addr4 = { .sin_family = AF_INET };
3656 struct sockaddr_in6 addr6 = { .sin6_family = AF_INET6 };
3657 struct rdma_cm_id *new_id = NULL;
3658 u16 val;
3659 int ret;
3660
3661 ret = kstrtou16(page, 0, &val);
3662 if (ret < 0)
3663 return ret;
3664 ret = count;
3665 if (rdma_cm_port == val)
3666 goto out;
3667
3668 if (val) {
3669 addr6.sin6_port = cpu_to_be16(val);
3670 new_id = srpt_create_rdma_id((struct sockaddr *)&addr6);
3671 if (IS_ERR(new_id)) {
3672 addr4.sin_port = cpu_to_be16(val);
3673 new_id = srpt_create_rdma_id((struct sockaddr *)&addr4);
3674 if (IS_ERR(new_id)) {
3675 ret = PTR_ERR(new_id);
3676 goto out;
3677 }
3678 }
3679 }
3680
3681 mutex_lock(&rdma_cm_mutex);
3682 rdma_cm_port = val;
3683 swap(rdma_cm_id, new_id);
3684 mutex_unlock(&rdma_cm_mutex);
3685
3686 if (new_id)
3687 rdma_destroy_id(new_id);
3688 ret = count;
3689out:
3690 return ret;
3691}
3692
3693CONFIGFS_ATTR(srpt_, rdma_cm_port);
3694
3695static struct configfs_attribute *srpt_da_attrs[] = {
3696 &srpt_attr_rdma_cm_port,
3697 NULL,
3698};
3699
3700static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page)
3701{
3702 struct se_portal_group *se_tpg = to_tpg(item);
3703 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3704
3705 return snprintf(page, PAGE_SIZE, "%d\n", sport->enabled);
3706}
3707
3708static ssize_t srpt_tpg_enable_store(struct config_item *item,
3709 const char *page, size_t count)
3710{
3711 struct se_portal_group *se_tpg = to_tpg(item);
3712 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3713 unsigned long tmp;
3714 int ret;
3715
3716 ret = kstrtoul(page, 0, &tmp);
3717 if (ret < 0) {
3718 pr_err("Unable to extract srpt_tpg_store_enable\n");
3719 return -EINVAL;
3720 }
3721
3722 if ((tmp != 0) && (tmp != 1)) {
3723 pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
3724 return -EINVAL;
3725 }
3726
3727 mutex_lock(&sport->mutex);
3728 srpt_set_enabled(sport, tmp);
3729 mutex_unlock(&sport->mutex);
3730
3731 return count;
3732}
3733
3734CONFIGFS_ATTR(srpt_tpg_, enable);
3735
3736static struct configfs_attribute *srpt_tpg_attrs[] = {
3737 &srpt_tpg_attr_enable,
3738 NULL,
3739};
3740
3741
3742
3743
3744
3745
3746static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
3747 const char *name)
3748{
3749 struct srpt_port_id *sport_id = srpt_wwn_to_sport_id(wwn);
3750 struct srpt_tpg *stpg;
3751 int res = -ENOMEM;
3752
3753 stpg = kzalloc(sizeof(*stpg), GFP_KERNEL);
3754 if (!stpg)
3755 return ERR_PTR(res);
3756 stpg->sport_id = sport_id;
3757 res = core_tpg_register(wwn, &stpg->tpg, SCSI_PROTOCOL_SRP);
3758 if (res) {
3759 kfree(stpg);
3760 return ERR_PTR(res);
3761 }
3762
3763 mutex_lock(&sport_id->mutex);
3764 list_add_tail(&stpg->entry, &sport_id->tpg_list);
3765 mutex_unlock(&sport_id->mutex);
3766
3767 return &stpg->tpg;
3768}
3769
3770
3771
3772
3773
3774static void srpt_drop_tpg(struct se_portal_group *tpg)
3775{
3776 struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
3777 struct srpt_port_id *sport_id = stpg->sport_id;
3778 struct srpt_port *sport = srpt_tpg_to_sport(tpg);
3779
3780 mutex_lock(&sport_id->mutex);
3781 list_del(&stpg->entry);
3782 mutex_unlock(&sport_id->mutex);
3783
3784 sport->enabled = false;
3785 core_tpg_deregister(tpg);
3786 kfree(stpg);
3787}
3788
3789
3790
3791
3792
3793
3794
3795static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
3796 struct config_group *group,
3797 const char *name)
3798{
3799 return srpt_lookup_wwn(name) ? : ERR_PTR(-EINVAL);
3800}
3801
3802
3803
3804
3805
3806static void srpt_drop_tport(struct se_wwn *wwn)
3807{
3808}
3809
3810static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf)
3811{
3812 return scnprintf(buf, PAGE_SIZE, "\n");
3813}
3814
3815CONFIGFS_ATTR_RO(srpt_wwn_, version);
3816
3817static struct configfs_attribute *srpt_wwn_attrs[] = {
3818 &srpt_wwn_attr_version,
3819 NULL,
3820};
3821
3822static const struct target_core_fabric_ops srpt_template = {
3823 .module = THIS_MODULE,
3824 .fabric_name = "srpt",
3825 .tpg_get_wwn = srpt_get_fabric_wwn,
3826 .tpg_get_tag = srpt_get_tag,
3827 .tpg_check_demo_mode = srpt_check_false,
3828 .tpg_check_demo_mode_cache = srpt_check_true,
3829 .tpg_check_demo_mode_write_protect = srpt_check_true,
3830 .tpg_check_prod_mode_write_protect = srpt_check_false,
3831 .tpg_get_inst_index = srpt_tpg_get_inst_index,
3832 .release_cmd = srpt_release_cmd,
3833 .check_stop_free = srpt_check_stop_free,
3834 .close_session = srpt_close_session,
3835 .sess_get_index = srpt_sess_get_index,
3836 .sess_get_initiator_sid = NULL,
3837 .write_pending = srpt_write_pending,
3838 .set_default_node_attributes = srpt_set_default_node_attrs,
3839 .get_cmd_state = srpt_get_tcm_cmd_state,
3840 .queue_data_in = srpt_queue_data_in,
3841 .queue_status = srpt_queue_status,
3842 .queue_tm_rsp = srpt_queue_tm_rsp,
3843 .aborted_task = srpt_aborted_task,
3844
3845
3846
3847
3848 .fabric_make_wwn = srpt_make_tport,
3849 .fabric_drop_wwn = srpt_drop_tport,
3850 .fabric_make_tpg = srpt_make_tpg,
3851 .fabric_drop_tpg = srpt_drop_tpg,
3852 .fabric_init_nodeacl = srpt_init_nodeacl,
3853
3854 .tfc_discovery_attrs = srpt_da_attrs,
3855 .tfc_wwn_attrs = srpt_wwn_attrs,
3856 .tfc_tpg_base_attrs = srpt_tpg_attrs,
3857 .tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs,
3858};
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868static int __init srpt_init_module(void)
3869{
3870 int ret;
3871
3872 ret = -EINVAL;
3873 if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
3874 pr_err("invalid value %d for kernel module parameter srp_max_req_size -- must be at least %d.\n",
3875 srp_max_req_size, MIN_MAX_REQ_SIZE);
3876 goto out;
3877 }
3878
3879 if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
3880 || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
3881 pr_err("invalid value %d for kernel module parameter srpt_srq_size -- must be in the range [%d..%d].\n",
3882 srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
3883 goto out;
3884 }
3885
3886 ret = target_register_template(&srpt_template);
3887 if (ret)
3888 goto out;
3889
3890 ret = ib_register_client(&srpt_client);
3891 if (ret) {
3892 pr_err("couldn't register IB client\n");
3893 goto out_unregister_target;
3894 }
3895
3896 return 0;
3897
3898out_unregister_target:
3899 target_unregister_template(&srpt_template);
3900out:
3901 return ret;
3902}
3903
3904static void __exit srpt_cleanup_module(void)
3905{
3906 if (rdma_cm_id)
3907 rdma_destroy_id(rdma_cm_id);
3908 ib_unregister_client(&srpt_client);
3909 target_unregister_template(&srpt_template);
3910}
3911
3912module_init(srpt_init_module);
3913module_exit(srpt_cleanup_module);
3914