1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/compat.h>
19#include <linux/ioctl.h>
20#include <linux/mount.h>
21#include <asm/uaccess.h>
22#include "xfs.h"
23#include "xfs_fs.h"
24#include "xfs_bit.h"
25#include "xfs_log.h"
26#include "xfs_inum.h"
27#include "xfs_trans.h"
28#include "xfs_sb.h"
29#include "xfs_ag.h"
30#include "xfs_dir2.h"
31#include "xfs_dmapi.h"
32#include "xfs_mount.h"
33#include "xfs_bmap_btree.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dir2_sf.h"
36#include "xfs_vnode.h"
37#include "xfs_dinode.h"
38#include "xfs_inode.h"
39#include "xfs_itable.h"
40#include "xfs_error.h"
41#include "xfs_dfrag.h"
42#include "xfs_vnodeops.h"
43#include "xfs_fsops.h"
44#include "xfs_alloc.h"
45#include "xfs_rtalloc.h"
46#include "xfs_attr.h"
47#include "xfs_ioctl.h"
48#include "xfs_ioctl32.h"
49
50#define _NATIVE_IOC(cmd, type) \
51 _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
52
53#ifdef BROKEN_X86_ALIGNMENT
54STATIC int
55xfs_compat_flock64_copyin(
56 xfs_flock64_t *bf,
57 compat_xfs_flock64_t __user *arg32)
58{
59 if (get_user(bf->l_type, &arg32->l_type) ||
60 get_user(bf->l_whence, &arg32->l_whence) ||
61 get_user(bf->l_start, &arg32->l_start) ||
62 get_user(bf->l_len, &arg32->l_len) ||
63 get_user(bf->l_sysid, &arg32->l_sysid) ||
64 get_user(bf->l_pid, &arg32->l_pid) ||
65 copy_from_user(bf->l_pad, &arg32->l_pad, 4*sizeof(u32)))
66 return -XFS_ERROR(EFAULT);
67 return 0;
68}
69
70STATIC int
71xfs_compat_ioc_fsgeometry_v1(
72 struct xfs_mount *mp,
73 compat_xfs_fsop_geom_v1_t __user *arg32)
74{
75 xfs_fsop_geom_t fsgeo;
76 int error;
77
78 error = xfs_fs_geometry(mp, &fsgeo, 3);
79 if (error)
80 return -error;
81
82 if (copy_to_user(arg32, &fsgeo, sizeof(struct compat_xfs_fsop_geom_v1)))
83 return -XFS_ERROR(EFAULT);
84 return 0;
85}
86
87STATIC int
88xfs_compat_growfs_data_copyin(
89 struct xfs_growfs_data *in,
90 compat_xfs_growfs_data_t __user *arg32)
91{
92 if (get_user(in->newblocks, &arg32->newblocks) ||
93 get_user(in->imaxpct, &arg32->imaxpct))
94 return -XFS_ERROR(EFAULT);
95 return 0;
96}
97
98STATIC int
99xfs_compat_growfs_rt_copyin(
100 struct xfs_growfs_rt *in,
101 compat_xfs_growfs_rt_t __user *arg32)
102{
103 if (get_user(in->newblocks, &arg32->newblocks) ||
104 get_user(in->extsize, &arg32->extsize))
105 return -XFS_ERROR(EFAULT);
106 return 0;
107}
108
109STATIC int
110xfs_inumbers_fmt_compat(
111 void __user *ubuffer,
112 const xfs_inogrp_t *buffer,
113 long count,
114 long *written)
115{
116 compat_xfs_inogrp_t __user *p32 = ubuffer;
117 long i;
118
119 for (i = 0; i < count; i++) {
120 if (put_user(buffer[i].xi_startino, &p32[i].xi_startino) ||
121 put_user(buffer[i].xi_alloccount, &p32[i].xi_alloccount) ||
122 put_user(buffer[i].xi_allocmask, &p32[i].xi_allocmask))
123 return -XFS_ERROR(EFAULT);
124 }
125 *written = count * sizeof(*p32);
126 return 0;
127}
128
129#else
130#define xfs_inumbers_fmt_compat xfs_inumbers_fmt
131#endif
132
133STATIC int
134xfs_ioctl32_bstime_copyin(
135 xfs_bstime_t *bstime,
136 compat_xfs_bstime_t __user *bstime32)
137{
138 compat_time_t sec32;
139
140 if (get_user(sec32, &bstime32->tv_sec) ||
141 get_user(bstime->tv_nsec, &bstime32->tv_nsec))
142 return -XFS_ERROR(EFAULT);
143 bstime->tv_sec = sec32;
144 return 0;
145}
146
147
148STATIC int
149xfs_ioctl32_bstat_copyin(
150 xfs_bstat_t *bstat,
151 compat_xfs_bstat_t __user *bstat32)
152{
153 if (get_user(bstat->bs_ino, &bstat32->bs_ino) ||
154 get_user(bstat->bs_mode, &bstat32->bs_mode) ||
155 get_user(bstat->bs_nlink, &bstat32->bs_nlink) ||
156 get_user(bstat->bs_uid, &bstat32->bs_uid) ||
157 get_user(bstat->bs_gid, &bstat32->bs_gid) ||
158 get_user(bstat->bs_rdev, &bstat32->bs_rdev) ||
159 get_user(bstat->bs_blksize, &bstat32->bs_blksize) ||
160 get_user(bstat->bs_size, &bstat32->bs_size) ||
161 xfs_ioctl32_bstime_copyin(&bstat->bs_atime, &bstat32->bs_atime) ||
162 xfs_ioctl32_bstime_copyin(&bstat->bs_mtime, &bstat32->bs_mtime) ||
163 xfs_ioctl32_bstime_copyin(&bstat->bs_ctime, &bstat32->bs_ctime) ||
164 get_user(bstat->bs_blocks, &bstat32->bs_size) ||
165 get_user(bstat->bs_xflags, &bstat32->bs_size) ||
166 get_user(bstat->bs_extsize, &bstat32->bs_extsize) ||
167 get_user(bstat->bs_extents, &bstat32->bs_extents) ||
168 get_user(bstat->bs_gen, &bstat32->bs_gen) ||
169 get_user(bstat->bs_projid, &bstat32->bs_projid) ||
170 get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) ||
171 get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) ||
172 get_user(bstat->bs_aextents, &bstat32->bs_aextents))
173 return -XFS_ERROR(EFAULT);
174 return 0;
175}
176
177
178
179STATIC int
180xfs_bstime_store_compat(
181 compat_xfs_bstime_t __user *p32,
182 const xfs_bstime_t *p)
183{
184 __s32 sec32;
185
186 sec32 = p->tv_sec;
187 if (put_user(sec32, &p32->tv_sec) ||
188 put_user(p->tv_nsec, &p32->tv_nsec))
189 return -XFS_ERROR(EFAULT);
190 return 0;
191}
192
193
194STATIC int
195xfs_bulkstat_one_fmt_compat(
196 void __user *ubuffer,
197 int ubsize,
198 int *ubused,
199 const xfs_bstat_t *buffer)
200{
201 compat_xfs_bstat_t __user *p32 = ubuffer;
202
203 if (ubsize < sizeof(*p32))
204 return XFS_ERROR(ENOMEM);
205
206 if (put_user(buffer->bs_ino, &p32->bs_ino) ||
207 put_user(buffer->bs_mode, &p32->bs_mode) ||
208 put_user(buffer->bs_nlink, &p32->bs_nlink) ||
209 put_user(buffer->bs_uid, &p32->bs_uid) ||
210 put_user(buffer->bs_gid, &p32->bs_gid) ||
211 put_user(buffer->bs_rdev, &p32->bs_rdev) ||
212 put_user(buffer->bs_blksize, &p32->bs_blksize) ||
213 put_user(buffer->bs_size, &p32->bs_size) ||
214 xfs_bstime_store_compat(&p32->bs_atime, &buffer->bs_atime) ||
215 xfs_bstime_store_compat(&p32->bs_mtime, &buffer->bs_mtime) ||
216 xfs_bstime_store_compat(&p32->bs_ctime, &buffer->bs_ctime) ||
217 put_user(buffer->bs_blocks, &p32->bs_blocks) ||
218 put_user(buffer->bs_xflags, &p32->bs_xflags) ||
219 put_user(buffer->bs_extsize, &p32->bs_extsize) ||
220 put_user(buffer->bs_extents, &p32->bs_extents) ||
221 put_user(buffer->bs_gen, &p32->bs_gen) ||
222 put_user(buffer->bs_projid, &p32->bs_projid) ||
223 put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) ||
224 put_user(buffer->bs_dmstate, &p32->bs_dmstate) ||
225 put_user(buffer->bs_aextents, &p32->bs_aextents))
226 return XFS_ERROR(EFAULT);
227 if (ubused)
228 *ubused = sizeof(*p32);
229 return 0;
230}
231
232STATIC int
233xfs_bulkstat_one_compat(
234 xfs_mount_t *mp,
235 xfs_ino_t ino,
236 void __user *buffer,
237 int ubsize,
238 void *private_data,
239 xfs_daddr_t bno,
240 int *ubused,
241 void *dibuff,
242 int *stat)
243{
244 return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
245 xfs_bulkstat_one_fmt_compat, bno,
246 ubused, dibuff, stat);
247}
248
249
250STATIC int
251xfs_compat_ioc_bulkstat(
252 xfs_mount_t *mp,
253 unsigned int cmd,
254 compat_xfs_fsop_bulkreq_t __user *p32)
255{
256 u32 addr;
257 xfs_fsop_bulkreq_t bulkreq;
258 int count;
259 xfs_ino_t inlast;
260 int done;
261 int error;
262
263
264
265
266 if (!capable(CAP_SYS_ADMIN))
267 return -XFS_ERROR(EPERM);
268
269 if (XFS_FORCED_SHUTDOWN(mp))
270 return -XFS_ERROR(EIO);
271
272 if (get_user(addr, &p32->lastip))
273 return -XFS_ERROR(EFAULT);
274 bulkreq.lastip = compat_ptr(addr);
275 if (get_user(bulkreq.icount, &p32->icount) ||
276 get_user(addr, &p32->ubuffer))
277 return -XFS_ERROR(EFAULT);
278 bulkreq.ubuffer = compat_ptr(addr);
279 if (get_user(addr, &p32->ocount))
280 return -XFS_ERROR(EFAULT);
281 bulkreq.ocount = compat_ptr(addr);
282
283 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64)))
284 return -XFS_ERROR(EFAULT);
285
286 if ((count = bulkreq.icount) <= 0)
287 return -XFS_ERROR(EINVAL);
288
289 if (bulkreq.ubuffer == NULL)
290 return -XFS_ERROR(EINVAL);
291
292 if (cmd == XFS_IOC_FSINUMBERS_32) {
293 error = xfs_inumbers(mp, &inlast, &count,
294 bulkreq.ubuffer, xfs_inumbers_fmt_compat);
295 } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE_32) {
296 int res;
297
298 error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer,
299 sizeof(compat_xfs_bstat_t),
300 NULL, 0, NULL, NULL, &res);
301 } else if (cmd == XFS_IOC_FSBULKSTAT_32) {
302 error = xfs_bulkstat(mp, &inlast, &count,
303 xfs_bulkstat_one_compat, NULL,
304 sizeof(compat_xfs_bstat_t), bulkreq.ubuffer,
305 BULKSTAT_FG_QUICK, &done);
306 } else
307 error = XFS_ERROR(EINVAL);
308 if (error)
309 return -error;
310
311 if (bulkreq.ocount != NULL) {
312 if (copy_to_user(bulkreq.lastip, &inlast,
313 sizeof(xfs_ino_t)))
314 return -XFS_ERROR(EFAULT);
315
316 if (copy_to_user(bulkreq.ocount, &count, sizeof(count)))
317 return -XFS_ERROR(EFAULT);
318 }
319
320 return 0;
321}
322
323STATIC int
324xfs_compat_handlereq_copyin(
325 xfs_fsop_handlereq_t *hreq,
326 compat_xfs_fsop_handlereq_t __user *arg32)
327{
328 compat_xfs_fsop_handlereq_t hreq32;
329
330 if (copy_from_user(&hreq32, arg32, sizeof(compat_xfs_fsop_handlereq_t)))
331 return -XFS_ERROR(EFAULT);
332
333 hreq->fd = hreq32.fd;
334 hreq->path = compat_ptr(hreq32.path);
335 hreq->oflags = hreq32.oflags;
336 hreq->ihandle = compat_ptr(hreq32.ihandle);
337 hreq->ihandlen = hreq32.ihandlen;
338 hreq->ohandle = compat_ptr(hreq32.ohandle);
339 hreq->ohandlen = compat_ptr(hreq32.ohandlen);
340
341 return 0;
342}
343
344STATIC struct dentry *
345xfs_compat_handlereq_to_dentry(
346 struct file *parfilp,
347 compat_xfs_fsop_handlereq_t *hreq)
348{
349 return xfs_handle_to_dentry(parfilp,
350 compat_ptr(hreq->ihandle), hreq->ihandlen);
351}
352
353STATIC int
354xfs_compat_attrlist_by_handle(
355 struct file *parfilp,
356 void __user *arg)
357{
358 int error;
359 attrlist_cursor_kern_t *cursor;
360 compat_xfs_fsop_attrlist_handlereq_t al_hreq;
361 struct dentry *dentry;
362 char *kbuf;
363
364 if (!capable(CAP_SYS_ADMIN))
365 return -XFS_ERROR(EPERM);
366 if (copy_from_user(&al_hreq, arg,
367 sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
368 return -XFS_ERROR(EFAULT);
369 if (al_hreq.buflen > XATTR_LIST_MAX)
370 return -XFS_ERROR(EINVAL);
371
372
373
374
375 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
376 return -XFS_ERROR(EINVAL);
377
378 dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq);
379 if (IS_ERR(dentry))
380 return PTR_ERR(dentry);
381
382 error = -ENOMEM;
383 kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
384 if (!kbuf)
385 goto out_dput;
386
387 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
388 error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen,
389 al_hreq.flags, cursor);
390 if (error)
391 goto out_kfree;
392
393 if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen))
394 error = -EFAULT;
395
396 out_kfree:
397 kfree(kbuf);
398 out_dput:
399 dput(dentry);
400 return error;
401}
402
403STATIC int
404xfs_compat_attrmulti_by_handle(
405 struct file *parfilp,
406 void __user *arg)
407{
408 int error;
409 compat_xfs_attr_multiop_t *ops;
410 compat_xfs_fsop_attrmulti_handlereq_t am_hreq;
411 struct dentry *dentry;
412 unsigned int i, size;
413 char *attr_name;
414
415 if (!capable(CAP_SYS_ADMIN))
416 return -XFS_ERROR(EPERM);
417 if (copy_from_user(&am_hreq, arg,
418 sizeof(compat_xfs_fsop_attrmulti_handlereq_t)))
419 return -XFS_ERROR(EFAULT);
420
421 dentry = xfs_compat_handlereq_to_dentry(parfilp, &am_hreq.hreq);
422 if (IS_ERR(dentry))
423 return PTR_ERR(dentry);
424
425 error = E2BIG;
426 size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t);
427 if (!size || size > 16 * PAGE_SIZE)
428 goto out_dput;
429
430 ops = memdup_user(compat_ptr(am_hreq.ops), size);
431 if (IS_ERR(ops)) {
432 error = PTR_ERR(ops);
433 goto out_dput;
434 }
435
436 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL);
437 if (!attr_name)
438 goto out_kfree_ops;
439
440 error = 0;
441 for (i = 0; i < am_hreq.opcount; i++) {
442 ops[i].am_error = strncpy_from_user(attr_name,
443 compat_ptr(ops[i].am_attrname),
444 MAXNAMELEN);
445 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
446 error = -ERANGE;
447 if (ops[i].am_error < 0)
448 break;
449
450 switch (ops[i].am_opcode) {
451 case ATTR_OP_GET:
452 ops[i].am_error = xfs_attrmulti_attr_get(
453 dentry->d_inode, attr_name,
454 compat_ptr(ops[i].am_attrvalue),
455 &ops[i].am_length, ops[i].am_flags);
456 break;
457 case ATTR_OP_SET:
458 ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
459 if (ops[i].am_error)
460 break;
461 ops[i].am_error = xfs_attrmulti_attr_set(
462 dentry->d_inode, attr_name,
463 compat_ptr(ops[i].am_attrvalue),
464 ops[i].am_length, ops[i].am_flags);
465 mnt_drop_write(parfilp->f_path.mnt);
466 break;
467 case ATTR_OP_REMOVE:
468 ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
469 if (ops[i].am_error)
470 break;
471 ops[i].am_error = xfs_attrmulti_attr_remove(
472 dentry->d_inode, attr_name,
473 ops[i].am_flags);
474 mnt_drop_write(parfilp->f_path.mnt);
475 break;
476 default:
477 ops[i].am_error = EINVAL;
478 }
479 }
480
481 if (copy_to_user(compat_ptr(am_hreq.ops), ops, size))
482 error = XFS_ERROR(EFAULT);
483
484 kfree(attr_name);
485 out_kfree_ops:
486 kfree(ops);
487 out_dput:
488 dput(dentry);
489 return -error;
490}
491
492STATIC int
493xfs_compat_fssetdm_by_handle(
494 struct file *parfilp,
495 void __user *arg)
496{
497 int error;
498 struct fsdmidata fsd;
499 compat_xfs_fsop_setdm_handlereq_t dmhreq;
500 struct dentry *dentry;
501
502 if (!capable(CAP_MKNOD))
503 return -XFS_ERROR(EPERM);
504 if (copy_from_user(&dmhreq, arg,
505 sizeof(compat_xfs_fsop_setdm_handlereq_t)))
506 return -XFS_ERROR(EFAULT);
507
508 dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq);
509 if (IS_ERR(dentry))
510 return PTR_ERR(dentry);
511
512 if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) {
513 error = -XFS_ERROR(EPERM);
514 goto out;
515 }
516
517 if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) {
518 error = -XFS_ERROR(EFAULT);
519 goto out;
520 }
521
522 error = -xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask,
523 fsd.fsd_dmstate);
524
525out:
526 dput(dentry);
527 return error;
528}
529
530long
531xfs_file_compat_ioctl(
532 struct file *filp,
533 unsigned cmd,
534 unsigned long p)
535{
536 struct inode *inode = filp->f_path.dentry->d_inode;
537 struct xfs_inode *ip = XFS_I(inode);
538 struct xfs_mount *mp = ip->i_mount;
539 void __user *arg = (void __user *)p;
540 int ioflags = 0;
541 int error;
542
543 if (filp->f_mode & FMODE_NOCMTIME)
544 ioflags |= IO_INVIS;
545
546 xfs_itrace_entry(ip);
547
548 switch (cmd) {
549
550 case XFS_IOC_DIOINFO:
551 case XFS_IOC_FSGEOMETRY:
552 case XFS_IOC_FSGETXATTR:
553 case XFS_IOC_FSSETXATTR:
554 case XFS_IOC_FSGETXATTRA:
555 case XFS_IOC_FSSETDM:
556 case XFS_IOC_GETBMAP:
557 case XFS_IOC_GETBMAPA:
558 case XFS_IOC_GETBMAPX:
559 case XFS_IOC_FSCOUNTS:
560 case XFS_IOC_SET_RESBLKS:
561 case XFS_IOC_GET_RESBLKS:
562 case XFS_IOC_FSGROWFSLOG:
563 case XFS_IOC_GOINGDOWN:
564 case XFS_IOC_ERROR_INJECTION:
565 case XFS_IOC_ERROR_CLEARALL:
566 return xfs_file_ioctl(filp, cmd, p);
567#ifndef BROKEN_X86_ALIGNMENT
568
569 case XFS_IOC_ALLOCSP:
570 case XFS_IOC_FREESP:
571 case XFS_IOC_RESVSP:
572 case XFS_IOC_UNRESVSP:
573 case XFS_IOC_ALLOCSP64:
574 case XFS_IOC_FREESP64:
575 case XFS_IOC_RESVSP64:
576 case XFS_IOC_UNRESVSP64:
577 case XFS_IOC_FSGEOMETRY_V1:
578 case XFS_IOC_FSGROWFSDATA:
579 case XFS_IOC_FSGROWFSRT:
580 return xfs_file_ioctl(filp, cmd, p);
581#else
582 case XFS_IOC_ALLOCSP_32:
583 case XFS_IOC_FREESP_32:
584 case XFS_IOC_ALLOCSP64_32:
585 case XFS_IOC_FREESP64_32:
586 case XFS_IOC_RESVSP_32:
587 case XFS_IOC_UNRESVSP_32:
588 case XFS_IOC_RESVSP64_32:
589 case XFS_IOC_UNRESVSP64_32: {
590 struct xfs_flock64 bf;
591
592 if (xfs_compat_flock64_copyin(&bf, arg))
593 return -XFS_ERROR(EFAULT);
594 cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
595 return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf);
596 }
597 case XFS_IOC_FSGEOMETRY_V1_32:
598 return xfs_compat_ioc_fsgeometry_v1(mp, arg);
599 case XFS_IOC_FSGROWFSDATA_32: {
600 struct xfs_growfs_data in;
601
602 if (xfs_compat_growfs_data_copyin(&in, arg))
603 return -XFS_ERROR(EFAULT);
604 error = xfs_growfs_data(mp, &in);
605 return -error;
606 }
607 case XFS_IOC_FSGROWFSRT_32: {
608 struct xfs_growfs_rt in;
609
610 if (xfs_compat_growfs_rt_copyin(&in, arg))
611 return -XFS_ERROR(EFAULT);
612 error = xfs_growfs_rt(mp, &in);
613 return -error;
614 }
615#endif
616
617 case XFS_IOC_GETXFLAGS_32:
618 case XFS_IOC_SETXFLAGS_32:
619 case XFS_IOC_GETVERSION_32:
620 cmd = _NATIVE_IOC(cmd, long);
621 return xfs_file_ioctl(filp, cmd, p);
622 case XFS_IOC_SWAPEXT_32: {
623 struct xfs_swapext sxp;
624 struct compat_xfs_swapext __user *sxu = arg;
625
626
627 if (copy_from_user(&sxp, sxu,
628 offsetof(struct xfs_swapext, sx_stat)) ||
629 xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat))
630 return -XFS_ERROR(EFAULT);
631 error = xfs_swapext(&sxp);
632 return -error;
633 }
634 case XFS_IOC_FSBULKSTAT_32:
635 case XFS_IOC_FSBULKSTAT_SINGLE_32:
636 case XFS_IOC_FSINUMBERS_32:
637 return xfs_compat_ioc_bulkstat(mp, cmd, arg);
638 case XFS_IOC_FD_TO_HANDLE_32:
639 case XFS_IOC_PATH_TO_HANDLE_32:
640 case XFS_IOC_PATH_TO_FSHANDLE_32: {
641 struct xfs_fsop_handlereq hreq;
642
643 if (xfs_compat_handlereq_copyin(&hreq, arg))
644 return -XFS_ERROR(EFAULT);
645 cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq);
646 return xfs_find_handle(cmd, &hreq);
647 }
648 case XFS_IOC_OPEN_BY_HANDLE_32: {
649 struct xfs_fsop_handlereq hreq;
650
651 if (xfs_compat_handlereq_copyin(&hreq, arg))
652 return -XFS_ERROR(EFAULT);
653 return xfs_open_by_handle(filp, &hreq);
654 }
655 case XFS_IOC_READLINK_BY_HANDLE_32: {
656 struct xfs_fsop_handlereq hreq;
657
658 if (xfs_compat_handlereq_copyin(&hreq, arg))
659 return -XFS_ERROR(EFAULT);
660 return xfs_readlink_by_handle(filp, &hreq);
661 }
662 case XFS_IOC_ATTRLIST_BY_HANDLE_32:
663 return xfs_compat_attrlist_by_handle(filp, arg);
664 case XFS_IOC_ATTRMULTI_BY_HANDLE_32:
665 return xfs_compat_attrmulti_by_handle(filp, arg);
666 case XFS_IOC_FSSETDM_BY_HANDLE_32:
667 return xfs_compat_fssetdm_by_handle(filp, arg);
668 default:
669 return -XFS_ERROR(ENOIOCTLCMD);
670 }
671}
672