1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#include <linux/kmod.h>
41#include <linux/moduleparam.h>
42#include <linux/ratelimit.h>
43#include <scsi/osd_initiator.h>
44#include "objlayout.h"
45
46#define NFSDBG_FACILITY NFSDBG_PNFS_LD
47
48
49
50struct pnfs_layout_hdr *
51objlayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
52{
53 struct objlayout *objlay;
54
55 objlay = kzalloc(sizeof(struct objlayout), gfp_flags);
56 if (objlay) {
57 spin_lock_init(&objlay->lock);
58 INIT_LIST_HEAD(&objlay->err_list);
59 }
60 dprintk("%s: Return %p\n", __func__, objlay);
61 return &objlay->pnfs_layout;
62}
63
64
65
66
67void
68objlayout_free_layout_hdr(struct pnfs_layout_hdr *lo)
69{
70 struct objlayout *objlay = OBJLAYOUT(lo);
71
72 dprintk("%s: objlay %p\n", __func__, objlay);
73
74 WARN_ON(!list_empty(&objlay->err_list));
75 kfree(objlay);
76}
77
78
79
80
81struct pnfs_layout_segment *
82objlayout_alloc_lseg(struct pnfs_layout_hdr *pnfslay,
83 struct nfs4_layoutget_res *lgr,
84 gfp_t gfp_flags)
85{
86 int status = -ENOMEM;
87 struct xdr_stream stream;
88 struct xdr_buf buf = {
89 .pages = lgr->layoutp->pages,
90 .page_len = lgr->layoutp->len,
91 .buflen = lgr->layoutp->len,
92 .len = lgr->layoutp->len,
93 };
94 struct page *scratch;
95 struct pnfs_layout_segment *lseg;
96
97 dprintk("%s: Begin pnfslay %p\n", __func__, pnfslay);
98
99 scratch = alloc_page(gfp_flags);
100 if (!scratch)
101 goto err_nofree;
102
103 xdr_init_decode(&stream, &buf, NULL);
104 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
105
106 status = objio_alloc_lseg(&lseg, pnfslay, &lgr->range, &stream, gfp_flags);
107 if (unlikely(status)) {
108 dprintk("%s: objio_alloc_lseg Return err %d\n", __func__,
109 status);
110 goto err;
111 }
112
113 __free_page(scratch);
114
115 dprintk("%s: Return %p\n", __func__, lseg);
116 return lseg;
117
118err:
119 __free_page(scratch);
120err_nofree:
121 dprintk("%s: Err Return=>%d\n", __func__, status);
122 return ERR_PTR(status);
123}
124
125
126
127
128void
129objlayout_free_lseg(struct pnfs_layout_segment *lseg)
130{
131 dprintk("%s: freeing layout segment %p\n", __func__, lseg);
132
133 if (unlikely(!lseg))
134 return;
135
136 objio_free_lseg(lseg);
137}
138
139
140
141
142static inline u64
143end_offset(u64 start, u64 len)
144{
145 u64 end;
146
147 end = start + len;
148 return end >= start ? end : NFS4_MAX_UINT64;
149}
150
151static void _fix_verify_io_params(struct pnfs_layout_segment *lseg,
152 struct page ***p_pages, unsigned *p_pgbase,
153 u64 offset, unsigned long count)
154{
155 u64 lseg_end_offset;
156
157 BUG_ON(offset < lseg->pls_range.offset);
158 lseg_end_offset = end_offset(lseg->pls_range.offset,
159 lseg->pls_range.length);
160 BUG_ON(offset >= lseg_end_offset);
161 WARN_ON(offset + count > lseg_end_offset);
162
163 if (*p_pgbase > PAGE_SIZE) {
164 dprintk("%s: pgbase(0x%x) > PAGE_SIZE\n", __func__, *p_pgbase);
165 *p_pages += *p_pgbase >> PAGE_SHIFT;
166 *p_pgbase &= ~PAGE_MASK;
167 }
168}
169
170
171
172
173static void
174objlayout_iodone(struct objlayout_io_res *oir)
175{
176 if (likely(oir->status >= 0)) {
177 objio_free_result(oir);
178 } else {
179 struct objlayout *objlay = oir->objlay;
180
181 spin_lock(&objlay->lock);
182 objlay->delta_space_valid = OBJ_DSU_INVALID;
183 list_add(&objlay->err_list, &oir->err_list);
184 spin_unlock(&objlay->lock);
185 }
186}
187
188
189
190
191
192
193
194void
195objlayout_io_set_result(struct objlayout_io_res *oir, unsigned index,
196 struct pnfs_osd_objid *pooid, int osd_error,
197 u64 offset, u64 length, bool is_write)
198{
199 struct pnfs_osd_ioerr *ioerr = &oir->ioerrs[index];
200
201 BUG_ON(index >= oir->num_comps);
202 if (osd_error) {
203 ioerr->oer_component = *pooid;
204 ioerr->oer_comp_offset = offset;
205 ioerr->oer_comp_length = length;
206 ioerr->oer_iswrite = is_write;
207 ioerr->oer_errno = osd_error;
208
209 dprintk("%s: err[%d]: errno=%d is_write=%d dev(%llx:%llx) "
210 "par=0x%llx obj=0x%llx offset=0x%llx length=0x%llx\n",
211 __func__, index, ioerr->oer_errno,
212 ioerr->oer_iswrite,
213 _DEVID_LO(&ioerr->oer_component.oid_device_id),
214 _DEVID_HI(&ioerr->oer_component.oid_device_id),
215 ioerr->oer_component.oid_partition_id,
216 ioerr->oer_component.oid_object_id,
217 ioerr->oer_comp_offset,
218 ioerr->oer_comp_length);
219 } else {
220
221 ioerr->oer_errno = 0;
222 }
223}
224
225
226
227
228
229static void _rpc_read_complete(struct work_struct *work)
230{
231 struct rpc_task *task;
232 struct nfs_read_data *rdata;
233
234 dprintk("%s enter\n", __func__);
235 task = container_of(work, struct rpc_task, u.tk_work);
236 rdata = container_of(task, struct nfs_read_data, task);
237
238 pnfs_ld_read_done(rdata);
239}
240
241void
242objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
243{
244 struct nfs_read_data *rdata = oir->rpcdata;
245
246 oir->status = rdata->task.tk_status = status;
247 if (status >= 0)
248 rdata->res.count = status;
249 else
250 rdata->header->pnfs_error = status;
251 objlayout_iodone(oir);
252
253
254 dprintk("%s: Return status=%zd eof=%d sync=%d\n", __func__,
255 status, rdata->res.eof, sync);
256
257 if (sync)
258 pnfs_ld_read_done(rdata);
259 else {
260 INIT_WORK(&rdata->task.u.tk_work, _rpc_read_complete);
261 schedule_work(&rdata->task.u.tk_work);
262 }
263}
264
265
266
267
268enum pnfs_try_status
269objlayout_read_pagelist(struct nfs_read_data *rdata)
270{
271 struct nfs_pgio_header *hdr = rdata->header;
272 struct inode *inode = hdr->inode;
273 loff_t offset = rdata->args.offset;
274 size_t count = rdata->args.count;
275 int err;
276 loff_t eof;
277
278 eof = i_size_read(inode);
279 if (unlikely(offset + count > eof)) {
280 if (offset >= eof) {
281 err = 0;
282 rdata->res.count = 0;
283 rdata->res.eof = 1;
284
285 goto out;
286 }
287 count = eof - offset;
288 }
289
290 rdata->res.eof = (offset + count) >= eof;
291 _fix_verify_io_params(hdr->lseg, &rdata->args.pages,
292 &rdata->args.pgbase,
293 rdata->args.offset, rdata->args.count);
294
295 dprintk("%s: inode(%lx) offset 0x%llx count 0x%Zx eof=%d\n",
296 __func__, inode->i_ino, offset, count, rdata->res.eof);
297
298 err = objio_read_pagelist(rdata);
299 out:
300 if (unlikely(err)) {
301 hdr->pnfs_error = err;
302 dprintk("%s: Returned Error %d\n", __func__, err);
303 return PNFS_NOT_ATTEMPTED;
304 }
305 return PNFS_ATTEMPTED;
306}
307
308
309
310
311
312static void _rpc_write_complete(struct work_struct *work)
313{
314 struct rpc_task *task;
315 struct nfs_write_data *wdata;
316
317 dprintk("%s enter\n", __func__);
318 task = container_of(work, struct rpc_task, u.tk_work);
319 wdata = container_of(task, struct nfs_write_data, task);
320
321 pnfs_ld_write_done(wdata);
322}
323
324void
325objlayout_write_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
326{
327 struct nfs_write_data *wdata = oir->rpcdata;
328
329 oir->status = wdata->task.tk_status = status;
330 if (status >= 0) {
331 wdata->res.count = status;
332 wdata->verf.committed = oir->committed;
333 } else {
334 wdata->header->pnfs_error = status;
335 }
336 objlayout_iodone(oir);
337
338
339 dprintk("%s: Return status %zd committed %d sync=%d\n", __func__,
340 status, wdata->verf.committed, sync);
341
342 if (sync)
343 pnfs_ld_write_done(wdata);
344 else {
345 INIT_WORK(&wdata->task.u.tk_work, _rpc_write_complete);
346 schedule_work(&wdata->task.u.tk_work);
347 }
348}
349
350
351
352
353enum pnfs_try_status
354objlayout_write_pagelist(struct nfs_write_data *wdata,
355 int how)
356{
357 struct nfs_pgio_header *hdr = wdata->header;
358 int err;
359
360 _fix_verify_io_params(hdr->lseg, &wdata->args.pages,
361 &wdata->args.pgbase,
362 wdata->args.offset, wdata->args.count);
363
364 err = objio_write_pagelist(wdata, how);
365 if (unlikely(err)) {
366 hdr->pnfs_error = err;
367 dprintk("%s: Returned Error %d\n", __func__, err);
368 return PNFS_NOT_ATTEMPTED;
369 }
370 return PNFS_ATTEMPTED;
371}
372
373void
374objlayout_encode_layoutcommit(struct pnfs_layout_hdr *pnfslay,
375 struct xdr_stream *xdr,
376 const struct nfs4_layoutcommit_args *args)
377{
378 struct objlayout *objlay = OBJLAYOUT(pnfslay);
379 struct pnfs_osd_layoutupdate lou;
380 __be32 *start;
381
382 dprintk("%s: Begin\n", __func__);
383
384 spin_lock(&objlay->lock);
385 lou.dsu_valid = (objlay->delta_space_valid == OBJ_DSU_VALID);
386 lou.dsu_delta = objlay->delta_space_used;
387 objlay->delta_space_used = 0;
388 objlay->delta_space_valid = OBJ_DSU_INIT;
389 lou.olu_ioerr_flag = !list_empty(&objlay->err_list);
390 spin_unlock(&objlay->lock);
391
392 start = xdr_reserve_space(xdr, 4);
393
394 BUG_ON(pnfs_osd_xdr_encode_layoutupdate(xdr, &lou));
395
396 *start = cpu_to_be32((xdr->p - start - 1) * 4);
397
398 dprintk("%s: Return delta_space_used %lld err %d\n", __func__,
399 lou.dsu_delta, lou.olu_ioerr_flag);
400}
401
402static int
403err_prio(u32 oer_errno)
404{
405 switch (oer_errno) {
406 case 0:
407 return 0;
408
409 case PNFS_OSD_ERR_RESOURCE:
410 return OSD_ERR_PRI_RESOURCE;
411 case PNFS_OSD_ERR_BAD_CRED:
412 return OSD_ERR_PRI_BAD_CRED;
413 case PNFS_OSD_ERR_NO_ACCESS:
414 return OSD_ERR_PRI_NO_ACCESS;
415 case PNFS_OSD_ERR_UNREACHABLE:
416 return OSD_ERR_PRI_UNREACHABLE;
417 case PNFS_OSD_ERR_NOT_FOUND:
418 return OSD_ERR_PRI_NOT_FOUND;
419 case PNFS_OSD_ERR_NO_SPACE:
420 return OSD_ERR_PRI_NO_SPACE;
421 default:
422 WARN_ON(1);
423
424 case PNFS_OSD_ERR_EIO:
425 return OSD_ERR_PRI_EIO;
426 }
427}
428
429static void
430merge_ioerr(struct pnfs_osd_ioerr *dest_err,
431 const struct pnfs_osd_ioerr *src_err)
432{
433 u64 dest_end, src_end;
434
435 if (!dest_err->oer_errno) {
436 *dest_err = *src_err;
437
438 memset(&dest_err->oer_component.oid_device_id, 0,
439 sizeof(dest_err->oer_component.oid_device_id));
440
441 return;
442 }
443
444 if (dest_err->oer_component.oid_partition_id !=
445 src_err->oer_component.oid_partition_id)
446 dest_err->oer_component.oid_partition_id = 0;
447
448 if (dest_err->oer_component.oid_object_id !=
449 src_err->oer_component.oid_object_id)
450 dest_err->oer_component.oid_object_id = 0;
451
452 if (dest_err->oer_comp_offset > src_err->oer_comp_offset)
453 dest_err->oer_comp_offset = src_err->oer_comp_offset;
454
455 dest_end = end_offset(dest_err->oer_comp_offset,
456 dest_err->oer_comp_length);
457 src_end = end_offset(src_err->oer_comp_offset,
458 src_err->oer_comp_length);
459 if (dest_end < src_end)
460 dest_end = src_end;
461
462 dest_err->oer_comp_length = dest_end - dest_err->oer_comp_offset;
463
464 if ((src_err->oer_iswrite == dest_err->oer_iswrite) &&
465 (err_prio(src_err->oer_errno) > err_prio(dest_err->oer_errno))) {
466 dest_err->oer_errno = src_err->oer_errno;
467 } else if (src_err->oer_iswrite) {
468 dest_err->oer_iswrite = true;
469 dest_err->oer_errno = src_err->oer_errno;
470 }
471}
472
473static void
474encode_accumulated_error(struct objlayout *objlay, __be32 *p)
475{
476 struct objlayout_io_res *oir, *tmp;
477 struct pnfs_osd_ioerr accumulated_err = {.oer_errno = 0};
478
479 list_for_each_entry_safe(oir, tmp, &objlay->err_list, err_list) {
480 unsigned i;
481
482 for (i = 0; i < oir->num_comps; i++) {
483 struct pnfs_osd_ioerr *ioerr = &oir->ioerrs[i];
484
485 if (!ioerr->oer_errno)
486 continue;
487
488 printk(KERN_ERR "NFS: %s: err[%d]: errno=%d "
489 "is_write=%d dev(%llx:%llx) par=0x%llx "
490 "obj=0x%llx offset=0x%llx length=0x%llx\n",
491 __func__, i, ioerr->oer_errno,
492 ioerr->oer_iswrite,
493 _DEVID_LO(&ioerr->oer_component.oid_device_id),
494 _DEVID_HI(&ioerr->oer_component.oid_device_id),
495 ioerr->oer_component.oid_partition_id,
496 ioerr->oer_component.oid_object_id,
497 ioerr->oer_comp_offset,
498 ioerr->oer_comp_length);
499
500 merge_ioerr(&accumulated_err, ioerr);
501 }
502 list_del(&oir->err_list);
503 objio_free_result(oir);
504 }
505
506 pnfs_osd_xdr_encode_ioerr(p, &accumulated_err);
507}
508
509void
510objlayout_encode_layoutreturn(struct pnfs_layout_hdr *pnfslay,
511 struct xdr_stream *xdr,
512 const struct nfs4_layoutreturn_args *args)
513{
514 struct objlayout *objlay = OBJLAYOUT(pnfslay);
515 struct objlayout_io_res *oir, *tmp;
516 __be32 *start;
517
518 dprintk("%s: Begin\n", __func__);
519 start = xdr_reserve_space(xdr, 4);
520 BUG_ON(!start);
521
522 spin_lock(&objlay->lock);
523
524 list_for_each_entry_safe(oir, tmp, &objlay->err_list, err_list) {
525 __be32 *last_xdr = NULL, *p;
526 unsigned i;
527 int res = 0;
528
529 for (i = 0; i < oir->num_comps; i++) {
530 struct pnfs_osd_ioerr *ioerr = &oir->ioerrs[i];
531
532 if (!ioerr->oer_errno)
533 continue;
534
535 dprintk("%s: err[%d]: errno=%d is_write=%d "
536 "dev(%llx:%llx) par=0x%llx obj=0x%llx "
537 "offset=0x%llx length=0x%llx\n",
538 __func__, i, ioerr->oer_errno,
539 ioerr->oer_iswrite,
540 _DEVID_LO(&ioerr->oer_component.oid_device_id),
541 _DEVID_HI(&ioerr->oer_component.oid_device_id),
542 ioerr->oer_component.oid_partition_id,
543 ioerr->oer_component.oid_object_id,
544 ioerr->oer_comp_offset,
545 ioerr->oer_comp_length);
546
547 p = pnfs_osd_xdr_ioerr_reserve_space(xdr);
548 if (unlikely(!p)) {
549 res = -E2BIG;
550 break;
551 }
552
553 last_xdr = p;
554 pnfs_osd_xdr_encode_ioerr(p, &oir->ioerrs[i]);
555 }
556
557
558 if (unlikely(res)) {
559
560 BUG_ON(!last_xdr);
561
562
563
564
565
566
567 encode_accumulated_error(objlay, last_xdr);
568 goto loop_done;
569 }
570 list_del(&oir->err_list);
571 objio_free_result(oir);
572 }
573loop_done:
574 spin_unlock(&objlay->lock);
575
576 *start = cpu_to_be32((xdr->p - start - 1) * 4);
577 dprintk("%s: Return\n", __func__);
578}
579
580
581
582
583
584struct objlayout_deviceinfo {
585 struct page *page;
586 struct pnfs_osd_deviceaddr da;
587};
588
589
590
591
592
593int objlayout_get_deviceinfo(struct pnfs_layout_hdr *pnfslay,
594 struct nfs4_deviceid *d_id, struct pnfs_osd_deviceaddr **deviceaddr,
595 gfp_t gfp_flags)
596{
597 struct objlayout_deviceinfo *odi;
598 struct pnfs_device pd;
599 struct page *page, **pages;
600 u32 *p;
601 int err;
602
603 page = alloc_page(gfp_flags);
604 if (!page)
605 return -ENOMEM;
606
607 pages = &page;
608 pd.pages = pages;
609
610 memcpy(&pd.dev_id, d_id, sizeof(*d_id));
611 pd.layout_type = LAYOUT_OSD2_OBJECTS;
612 pd.pages = &page;
613 pd.pgbase = 0;
614 pd.pglen = PAGE_SIZE;
615 pd.mincount = 0;
616
617 err = nfs4_proc_getdeviceinfo(NFS_SERVER(pnfslay->plh_inode), &pd);
618 dprintk("%s nfs_getdeviceinfo returned %d\n", __func__, err);
619 if (err)
620 goto err_out;
621
622 p = page_address(page);
623 odi = kzalloc(sizeof(*odi), gfp_flags);
624 if (!odi) {
625 err = -ENOMEM;
626 goto err_out;
627 }
628 pnfs_osd_xdr_decode_deviceaddr(&odi->da, p);
629 odi->page = page;
630 *deviceaddr = &odi->da;
631 return 0;
632
633err_out:
634 __free_page(page);
635 return err;
636}
637
638void objlayout_put_deviceinfo(struct pnfs_osd_deviceaddr *deviceaddr)
639{
640 struct objlayout_deviceinfo *odi = container_of(deviceaddr,
641 struct objlayout_deviceinfo,
642 da);
643
644 __free_page(odi->page);
645 kfree(odi);
646}
647
648enum {
649 OBJLAYOUT_MAX_URI_LEN = 256, OBJLAYOUT_MAX_OSDNAME_LEN = 64,
650 OBJLAYOUT_MAX_SYSID_HEX_LEN = OSD_SYSTEMID_LEN * 2 + 1,
651 OSD_LOGIN_UPCALL_PATHLEN = 256
652};
653
654static char osd_login_prog[OSD_LOGIN_UPCALL_PATHLEN] = "/sbin/osd_login";
655
656module_param_string(osd_login_prog, osd_login_prog, sizeof(osd_login_prog),
657 0600);
658MODULE_PARM_DESC(osd_login_prog, "Path to the osd_login upcall program");
659
660struct __auto_login {
661 char uri[OBJLAYOUT_MAX_URI_LEN];
662 char osdname[OBJLAYOUT_MAX_OSDNAME_LEN];
663 char systemid_hex[OBJLAYOUT_MAX_SYSID_HEX_LEN];
664};
665
666static int __objlayout_upcall(struct __auto_login *login)
667{
668 static char *envp[] = { "HOME=/",
669 "TERM=linux",
670 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
671 NULL
672 };
673 char *argv[8];
674 int ret;
675
676 if (unlikely(!osd_login_prog[0])) {
677 dprintk("%s: osd_login_prog is disabled\n", __func__);
678 return -EACCES;
679 }
680
681 dprintk("%s uri: %s\n", __func__, login->uri);
682 dprintk("%s osdname %s\n", __func__, login->osdname);
683 dprintk("%s systemid_hex %s\n", __func__, login->systemid_hex);
684
685 argv[0] = (char *)osd_login_prog;
686 argv[1] = "-u";
687 argv[2] = login->uri;
688 argv[3] = "-o";
689 argv[4] = login->osdname;
690 argv[5] = "-s";
691 argv[6] = login->systemid_hex;
692 argv[7] = NULL;
693
694 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
695
696
697
698
699
700
701 if (ret == -ENOENT || ret == -EACCES) {
702 printk(KERN_ERR "PNFS-OBJ: %s was not found please set "
703 "objlayoutdriver.osd_login_prog kernel parameter!\n",
704 osd_login_prog);
705 osd_login_prog[0] = '\0';
706 }
707 dprintk("%s %s return value: %d\n", __func__, osd_login_prog, ret);
708
709 return ret;
710}
711
712
713static void __copy_nfsS_and_zero_terminate(struct nfs4_string s,
714 char *dest, int max_len,
715 const char *var_name)
716{
717 if (!s.len)
718 return;
719
720 if (s.len >= max_len) {
721 pr_warn_ratelimited(
722 "objlayout_autologin: %s: s.len(%d) >= max_len(%d)",
723 var_name, s.len, max_len);
724 s.len = max_len - 1;
725 }
726
727 memcpy(dest, s.data, s.len);
728}
729
730
731static void _sysid_2_hex(struct nfs4_string s,
732 char sysid[OBJLAYOUT_MAX_SYSID_HEX_LEN])
733{
734 int i;
735 char *cur;
736
737 if (!s.len)
738 return;
739
740 if (s.len != OSD_SYSTEMID_LEN) {
741 pr_warn_ratelimited(
742 "objlayout_autologin: systemid_len(%d) != OSD_SYSTEMID_LEN",
743 s.len);
744 if (s.len > OSD_SYSTEMID_LEN)
745 s.len = OSD_SYSTEMID_LEN;
746 }
747
748 cur = sysid;
749 for (i = 0; i < s.len; i++)
750 cur = hex_byte_pack(cur, s.data[i]);
751}
752
753int objlayout_autologin(struct pnfs_osd_deviceaddr *deviceaddr)
754{
755 int rc;
756 struct __auto_login login;
757
758 if (!deviceaddr->oda_targetaddr.ota_netaddr.r_addr.len)
759 return -ENODEV;
760
761 memset(&login, 0, sizeof(login));
762 __copy_nfsS_and_zero_terminate(
763 deviceaddr->oda_targetaddr.ota_netaddr.r_addr,
764 login.uri, sizeof(login.uri), "URI");
765
766 __copy_nfsS_and_zero_terminate(
767 deviceaddr->oda_osdname,
768 login.osdname, sizeof(login.osdname), "OSDNAME");
769
770 _sysid_2_hex(deviceaddr->oda_systemid, login.systemid_hex);
771
772 rc = __objlayout_upcall(&login);
773 if (rc > 0)
774 rc = -ENODEV;
775
776 return rc;
777}
778