1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#define DEBUG_SUBSYSTEM S_RPC
38
39#include "../../include/linux/libcfs/libcfs.h"
40# ifdef __mips64__
41# include <linux/kernel.h>
42# endif
43
44#include "../include/obd_class.h"
45#include "../include/lustre_net.h"
46#include "../include/lustre_sec.h"
47#include "ptlrpc_internal.h"
48
49lnet_handle_eq_t ptlrpc_eq_h;
50
51
52
53
54void request_out_callback(lnet_event_t *ev)
55{
56 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
57 struct ptlrpc_request *req = cbid->cbid_arg;
58
59 LASSERT(ev->type == LNET_EVENT_SEND ||
60 ev->type == LNET_EVENT_UNLINK);
61 LASSERT(ev->unlinked);
62
63 DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
64
65 sptlrpc_request_out_callback(req);
66 spin_lock(&req->rq_lock);
67 req->rq_real_sent = get_seconds();
68 if (ev->unlinked)
69 req->rq_req_unlink = 0;
70
71 if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
72
73
74
75
76 req->rq_net_err = 1;
77 ptlrpc_client_wake_req(req);
78 }
79 spin_unlock(&req->rq_lock);
80
81 ptlrpc_req_finished(req);
82}
83
84
85
86
87void reply_in_callback(lnet_event_t *ev)
88{
89 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
90 struct ptlrpc_request *req = cbid->cbid_arg;
91
92 DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
93
94 LASSERT(ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
95 LASSERT(ev->md.start == req->rq_repbuf);
96 LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len);
97
98
99 LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
100
101 spin_lock(&req->rq_lock);
102
103 req->rq_receiving_reply = 0;
104 req->rq_early = 0;
105 if (ev->unlinked)
106 req->rq_reply_unlink = 0;
107
108 if (ev->status)
109 goto out_wake;
110
111 if (ev->type == LNET_EVENT_UNLINK) {
112 LASSERT(ev->unlinked);
113 DEBUG_REQ(D_NET, req, "unlink");
114 goto out_wake;
115 }
116
117 if (ev->mlength < ev->rlength) {
118 CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
119 req->rq_replen, ev->rlength, ev->offset);
120 req->rq_reply_truncate = 1;
121 req->rq_replied = 1;
122 req->rq_status = -EOVERFLOW;
123 req->rq_nob_received = ev->rlength + ev->offset;
124 goto out_wake;
125 }
126
127 if ((ev->offset == 0) &&
128 ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
129
130 DEBUG_REQ(D_ADAPTTO, req,
131 "Early reply received: mlen=%u offset=%d replen=%d replied=%d unlinked=%d",
132 ev->mlength, ev->offset,
133 req->rq_replen, req->rq_replied, ev->unlinked);
134
135 req->rq_early_count++;
136
137 if (req->rq_replied)
138 goto out_wake;
139
140 req->rq_early = 1;
141 req->rq_reply_off = ev->offset;
142 req->rq_nob_received = ev->mlength;
143
144 req->rq_receiving_reply = 1;
145 } else {
146
147 req->rq_rep_swab_mask = 0;
148 req->rq_replied = 1;
149
150 req->rq_resend = 0;
151 req->rq_reply_off = ev->offset;
152 req->rq_nob_received = ev->mlength;
153
154
155 DEBUG_REQ(D_INFO, req,
156 "reply in flags=%x mlen=%u offset=%d replen=%d",
157 lustre_msg_get_flags(req->rq_reqmsg),
158 ev->mlength, ev->offset, req->rq_replen);
159 }
160
161 req->rq_import->imp_last_reply_time = get_seconds();
162
163out_wake:
164
165
166 ptlrpc_client_wake_req(req);
167 spin_unlock(&req->rq_lock);
168}
169
170
171
172
173void client_bulk_callback(lnet_event_t *ev)
174{
175 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
176 struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
177 struct ptlrpc_request *req;
178
179 LASSERT((desc->bd_type == BULK_PUT_SINK &&
180 ev->type == LNET_EVENT_PUT) ||
181 (desc->bd_type == BULK_GET_SOURCE &&
182 ev->type == LNET_EVENT_GET) ||
183 ev->type == LNET_EVENT_UNLINK);
184 LASSERT(ev->unlinked);
185
186 if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
187 ev->status = -EIO;
188
189 if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,
190 CFS_FAIL_ONCE))
191 ev->status = -EIO;
192
193 CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
194 "event type %d, status %d, desc %p\n",
195 ev->type, ev->status, desc);
196
197 spin_lock(&desc->bd_lock);
198 req = desc->bd_req;
199 LASSERT(desc->bd_md_count > 0);
200 desc->bd_md_count--;
201
202 if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) {
203 desc->bd_nob_transferred += ev->mlength;
204 desc->bd_sender = ev->sender;
205 } else {
206
207 spin_lock(&req->rq_lock);
208 req->rq_net_err = 1;
209 spin_unlock(&req->rq_lock);
210 }
211
212 if (ev->status != 0)
213 desc->bd_failure = 1;
214
215
216
217 if (desc->bd_md_count == 0)
218 ptlrpc_client_wake_req(desc->bd_req);
219
220 spin_unlock(&desc->bd_lock);
221}
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239#define REQS_CPT_BITS(svcpt) ((svcpt)->scp_service->srv_cpt_bits)
240
241#define REQS_SEC_SHIFT 32
242#define REQS_USEC_SHIFT 16
243#define REQS_SEQ_SHIFT(svcpt) REQS_CPT_BITS(svcpt)
244
245static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
246 struct ptlrpc_request *req)
247{
248 __u64 sec = req->rq_arrival_time.tv_sec;
249 __u32 usec = req->rq_arrival_time.tv_usec >> 4;
250 __u64 new_seq;
251
252
253
254
255 new_seq = (sec << REQS_SEC_SHIFT) |
256 (usec << REQS_USEC_SHIFT) |
257 (svcpt->scp_cpt < 0 ? 0 : svcpt->scp_cpt);
258
259 if (new_seq > svcpt->scp_hist_seq) {
260
261
262 svcpt->scp_hist_seq = new_seq;
263 } else {
264 LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT);
265
266
267
268
269
270 svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt));
271 new_seq = svcpt->scp_hist_seq;
272 }
273
274 req->rq_history_seq = new_seq;
275
276 list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs);
277}
278
279
280
281
282void request_in_callback(lnet_event_t *ev)
283{
284 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
285 struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
286 struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
287 struct ptlrpc_service *service = svcpt->scp_service;
288 struct ptlrpc_request *req;
289
290 LASSERT(ev->type == LNET_EVENT_PUT ||
291 ev->type == LNET_EVENT_UNLINK);
292 LASSERT((char *)ev->md.start >= rqbd->rqbd_buffer);
293 LASSERT((char *)ev->md.start + ev->offset + ev->mlength <=
294 rqbd->rqbd_buffer + service->srv_buf_size);
295
296 CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
297 "event type %d, status %d, service %s\n",
298 ev->type, ev->status, service->srv_name);
299
300 if (ev->unlinked) {
301
302
303
304
305
306 req = &rqbd->rqbd_req;
307 memset(req, 0, sizeof(*req));
308 } else {
309 LASSERT(ev->type == LNET_EVENT_PUT);
310 if (ev->status != 0) {
311
312 return;
313 }
314 req = ptlrpc_request_cache_alloc(GFP_ATOMIC);
315 if (req == NULL) {
316 CERROR("Can't allocate incoming request descriptor: Dropping %s RPC from %s\n",
317 service->srv_name,
318 libcfs_id2str(ev->initiator));
319 return;
320 }
321 }
322
323
324
325
326 req->rq_xid = ev->match_bits;
327 req->rq_reqbuf = ev->md.start + ev->offset;
328 if (ev->type == LNET_EVENT_PUT && ev->status == 0)
329 req->rq_reqdata_len = ev->mlength;
330 do_gettimeofday(&req->rq_arrival_time);
331 req->rq_peer = ev->initiator;
332 req->rq_self = ev->target.nid;
333 req->rq_rqbd = rqbd;
334 req->rq_phase = RQ_PHASE_NEW;
335 spin_lock_init(&req->rq_lock);
336 INIT_LIST_HEAD(&req->rq_timed_list);
337 INIT_LIST_HEAD(&req->rq_exp_list);
338 atomic_set(&req->rq_refcount, 1);
339 if (ev->type == LNET_EVENT_PUT)
340 CDEBUG(D_INFO, "incoming req@%p x%llu msgsize %u\n",
341 req, req->rq_xid, ev->mlength);
342
343 CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
344
345 spin_lock(&svcpt->scp_lock);
346
347 ptlrpc_req_add_history(svcpt, req);
348
349 if (ev->unlinked) {
350 svcpt->scp_nrqbds_posted--;
351 CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
352 svcpt->scp_nrqbds_posted);
353
354
355
356 if (test_req_buffer_pressure &&
357 ev->type != LNET_EVENT_UNLINK &&
358 svcpt->scp_nrqbds_posted == 0)
359 CWARN("All %s request buffers busy\n",
360 service->srv_name);
361
362
363 } else {
364
365 rqbd->rqbd_refcount++;
366 }
367
368 list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
369 svcpt->scp_nreqs_incoming++;
370
371
372
373 wake_up(&svcpt->scp_waitq);
374
375 spin_unlock(&svcpt->scp_lock);
376}
377
378
379
380
381void reply_out_callback(lnet_event_t *ev)
382{
383 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
384 struct ptlrpc_reply_state *rs = cbid->cbid_arg;
385 struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
386
387 LASSERT(ev->type == LNET_EVENT_SEND ||
388 ev->type == LNET_EVENT_ACK ||
389 ev->type == LNET_EVENT_UNLINK);
390
391 if (!rs->rs_difficult) {
392
393
394 LASSERT(ev->unlinked);
395 ptlrpc_rs_decref(rs);
396 return;
397 }
398
399 LASSERT(rs->rs_on_net);
400
401 if (ev->unlinked) {
402
403
404 spin_lock(&svcpt->scp_rep_lock);
405 spin_lock(&rs->rs_lock);
406
407 rs->rs_on_net = 0;
408 if (!rs->rs_no_ack ||
409 rs->rs_transno <=
410 rs->rs_export->exp_obd->obd_last_committed)
411 ptlrpc_schedule_difficult_reply(rs);
412
413 spin_unlock(&rs->rs_lock);
414 spin_unlock(&svcpt->scp_rep_lock);
415 }
416}
417
418
419static void ptlrpc_master_callback(lnet_event_t *ev)
420{
421 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
422 void (*callback)(lnet_event_t *ev) = cbid->cbid_fn;
423
424
425 LASSERT(cbid->cbid_arg != LP_POISON);
426 LASSERT(callback == request_out_callback ||
427 callback == reply_in_callback ||
428 callback == client_bulk_callback ||
429 callback == request_in_callback ||
430 callback == reply_out_callback);
431
432 callback(ev);
433}
434
435int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
436 lnet_process_id_t *peer, lnet_nid_t *self)
437{
438 int best_dist = 0;
439 __u32 best_order = 0;
440 int count = 0;
441 int rc = -ENOENT;
442 int portals_compatibility;
443 int dist;
444 __u32 order;
445 lnet_nid_t dst_nid;
446 lnet_nid_t src_nid;
447
448 portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL);
449
450 peer->pid = LUSTRE_SRV_LNET_PID;
451
452
453 while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
454 dist = LNetDist(dst_nid, &src_nid, &order);
455 if (dist < 0)
456 continue;
457
458 if (dist == 0) {
459 peer->nid = *self = LNET_MKNID(LNET_MKNET(LOLND, 0), 0);
460 rc = 0;
461 break;
462 }
463
464 if (rc < 0 ||
465 dist < best_dist ||
466 (dist == best_dist && order < best_order)) {
467 best_dist = dist;
468 best_order = order;
469
470 if (portals_compatibility > 1) {
471
472
473
474
475 dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid));
476 src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid));
477 }
478 peer->nid = dst_nid;
479 *self = src_nid;
480 rc = 0;
481 }
482 }
483
484 CDEBUG(D_NET, "%s->%s\n", uuid->uuid, libcfs_id2str(*peer));
485 return rc;
486}
487
488void ptlrpc_ni_fini(void)
489{
490 wait_queue_head_t waitq;
491 struct l_wait_info lwi;
492 int rc;
493 int retries;
494
495
496
497
498
499
500 for (retries = 0;; retries++) {
501 rc = LNetEQFree(ptlrpc_eq_h);
502 switch (rc) {
503 default:
504 LBUG();
505
506 case 0:
507 LNetNIFini();
508 return;
509
510 case -EBUSY:
511 if (retries != 0)
512 CWARN("Event queue still busy\n");
513
514
515 init_waitqueue_head(&waitq);
516 lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
517 l_wait_event(waitq, 0, &lwi);
518 break;
519 }
520 }
521
522}
523
524lnet_pid_t ptl_get_pid(void)
525{
526 lnet_pid_t pid;
527
528 pid = LUSTRE_SRV_LNET_PID;
529 return pid;
530}
531
532int ptlrpc_ni_init(void)
533{
534 int rc;
535 lnet_pid_t pid;
536
537 pid = ptl_get_pid();
538 CDEBUG(D_NET, "My pid is: %x\n", pid);
539
540
541 rc = LNetNIInit(pid);
542 if (rc < 0) {
543 CDEBUG(D_NET, "Can't init network interface: %d\n", rc);
544 return -ENOENT;
545 }
546
547
548
549
550
551
552
553 rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h);
554 if (rc == 0)
555 return 0;
556
557 CERROR("Failed to allocate event queue: %d\n", rc);
558 LNetNIFini();
559
560 return -ENOMEM;
561}
562
563
564int ptlrpc_init_portals(void)
565{
566 int rc = ptlrpc_ni_init();
567
568 if (rc != 0) {
569 CERROR("network initialisation failed\n");
570 return -EIO;
571 }
572 rc = ptlrpcd_addref();
573 if (rc == 0)
574 return 0;
575
576 CERROR("rpcd initialisation failed\n");
577 ptlrpc_ni_fini();
578 return rc;
579}
580
581void ptlrpc_exit_portals(void)
582{
583 ptlrpcd_decref();
584 ptlrpc_ni_fini();
585}
586