1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/uaccess.h>
19#include <asm/syscall.h>
20#include <linux/linkage.h>
21#include <linux/stringify.h>
22#include <linux/errno.h>
23#include <linux/syscalls.h>
24#include <linux/file.h>
25#include <linux/fs.h>
26#include <linux/mman.h>
27#include <linux/sched/mm.h>
28#include <linux/shm.h>
29
30syscall_t sys_call_table[] = {
31#define __SYSCALL(nr, entry) (syscall_t)entry,
32#include <asm/syscall_table.h>
33};
34
35#define COLOUR_ALIGN(addr, pgoff) \
36 ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
37 (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
38
39asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
40{
41 unsigned long ret;
42 long err;
43
44 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
45 if (err)
46 return err;
47 return (long)ret;
48}
49
50asmlinkage long xtensa_fadvise64_64(int fd, int advice,
51 unsigned long long offset, unsigned long long len)
52{
53 return ksys_fadvise64_64(fd, offset, len, advice);
54}
55
56#ifdef CONFIG_MMU
57unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
58 unsigned long len, unsigned long pgoff, unsigned long flags)
59{
60 struct vm_area_struct *vmm;
61
62 if (flags & MAP_FIXED) {
63
64
65
66 if ((flags & MAP_SHARED) &&
67 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
68 return -EINVAL;
69 return addr;
70 }
71
72 if (len > TASK_SIZE)
73 return -ENOMEM;
74 if (!addr)
75 addr = TASK_UNMAPPED_BASE;
76
77 if (flags & MAP_SHARED)
78 addr = COLOUR_ALIGN(addr, pgoff);
79 else
80 addr = PAGE_ALIGN(addr);
81
82 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
83
84 if (TASK_SIZE - len < addr)
85 return -ENOMEM;
86 if (!vmm || addr + len <= vm_start_gap(vmm))
87 return addr;
88 addr = vmm->vm_end;
89 if (flags & MAP_SHARED)
90 addr = COLOUR_ALIGN(addr, pgoff);
91 }
92}
93#endif
94