1
2
3
4
5
6
7#include <linux/bpf.h>
8#include <linux/list.h>
9#include <linux/slab.h>
10#include <linux/capability.h>
11#include <linux/btf_ids.h>
12#include "percpu_freelist.h"
13
14#define QUEUE_STACK_CREATE_FLAG_MASK \
15 (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
16
17struct bpf_queue_stack {
18 struct bpf_map map;
19 raw_spinlock_t lock;
20 u32 head, tail;
21 u32 size;
22
23 char elements[] __aligned(8);
24};
25
26static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
27{
28 return container_of(map, struct bpf_queue_stack, map);
29}
30
31static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
32{
33 return qs->head == qs->tail;
34}
35
36static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
37{
38 u32 head = qs->head + 1;
39
40 if (unlikely(head >= qs->size))
41 head = 0;
42
43 return head == qs->tail;
44}
45
46
47static int queue_stack_map_alloc_check(union bpf_attr *attr)
48{
49 if (!bpf_capable())
50 return -EPERM;
51
52
53 if (attr->max_entries == 0 || attr->key_size != 0 ||
54 attr->value_size == 0 ||
55 attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK ||
56 !bpf_map_flags_access_ok(attr->map_flags))
57 return -EINVAL;
58
59 if (attr->value_size > KMALLOC_MAX_SIZE)
60
61
62
63 return -E2BIG;
64
65 return 0;
66}
67
68static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
69{
70 int numa_node = bpf_map_attr_numa_node(attr);
71 struct bpf_queue_stack *qs;
72 u64 size, queue_size;
73
74 size = (u64) attr->max_entries + 1;
75 queue_size = sizeof(*qs) + size * attr->value_size;
76
77 qs = bpf_map_area_alloc(queue_size, numa_node);
78 if (!qs)
79 return ERR_PTR(-ENOMEM);
80
81 memset(qs, 0, sizeof(*qs));
82
83 bpf_map_init_from_attr(&qs->map, attr);
84
85 qs->size = size;
86
87 raw_spin_lock_init(&qs->lock);
88
89 return &qs->map;
90}
91
92
93static void queue_stack_map_free(struct bpf_map *map)
94{
95 struct bpf_queue_stack *qs = bpf_queue_stack(map);
96
97 bpf_map_area_free(qs);
98}
99
100static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
101{
102 struct bpf_queue_stack *qs = bpf_queue_stack(map);
103 unsigned long flags;
104 int err = 0;
105 void *ptr;
106
107 raw_spin_lock_irqsave(&qs->lock, flags);
108
109 if (queue_stack_map_is_empty(qs)) {
110 memset(value, 0, qs->map.value_size);
111 err = -ENOENT;
112 goto out;
113 }
114
115 ptr = &qs->elements[qs->tail * qs->map.value_size];
116 memcpy(value, ptr, qs->map.value_size);
117
118 if (delete) {
119 if (unlikely(++qs->tail >= qs->size))
120 qs->tail = 0;
121 }
122
123out:
124 raw_spin_unlock_irqrestore(&qs->lock, flags);
125 return err;
126}
127
128
129static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
130{
131 struct bpf_queue_stack *qs = bpf_queue_stack(map);
132 unsigned long flags;
133 int err = 0;
134 void *ptr;
135 u32 index;
136
137 raw_spin_lock_irqsave(&qs->lock, flags);
138
139 if (queue_stack_map_is_empty(qs)) {
140 memset(value, 0, qs->map.value_size);
141 err = -ENOENT;
142 goto out;
143 }
144
145 index = qs->head - 1;
146 if (unlikely(index >= qs->size))
147 index = qs->size - 1;
148
149 ptr = &qs->elements[index * qs->map.value_size];
150 memcpy(value, ptr, qs->map.value_size);
151
152 if (delete)
153 qs->head = index;
154
155out:
156 raw_spin_unlock_irqrestore(&qs->lock, flags);
157 return err;
158}
159
160
161static int queue_map_peek_elem(struct bpf_map *map, void *value)
162{
163 return __queue_map_get(map, value, false);
164}
165
166
167static int stack_map_peek_elem(struct bpf_map *map, void *value)
168{
169 return __stack_map_get(map, value, false);
170}
171
172
173static int queue_map_pop_elem(struct bpf_map *map, void *value)
174{
175 return __queue_map_get(map, value, true);
176}
177
178
179static int stack_map_pop_elem(struct bpf_map *map, void *value)
180{
181 return __stack_map_get(map, value, true);
182}
183
184
185static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
186 u64 flags)
187{
188 struct bpf_queue_stack *qs = bpf_queue_stack(map);
189 unsigned long irq_flags;
190 int err = 0;
191 void *dst;
192
193
194
195
196 bool replace = (flags & BPF_EXIST);
197
198
199 if (flags & BPF_NOEXIST || flags > BPF_EXIST)
200 return -EINVAL;
201
202 raw_spin_lock_irqsave(&qs->lock, irq_flags);
203
204 if (queue_stack_map_is_full(qs)) {
205 if (!replace) {
206 err = -E2BIG;
207 goto out;
208 }
209
210 if (unlikely(++qs->tail >= qs->size))
211 qs->tail = 0;
212 }
213
214 dst = &qs->elements[qs->head * qs->map.value_size];
215 memcpy(dst, value, qs->map.value_size);
216
217 if (unlikely(++qs->head >= qs->size))
218 qs->head = 0;
219
220out:
221 raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
222 return err;
223}
224
225
226static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
227{
228 return NULL;
229}
230
231
232static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
233 void *value, u64 flags)
234{
235 return -EINVAL;
236}
237
238
239static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
240{
241 return -EINVAL;
242}
243
244
245static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
246 void *next_key)
247{
248 return -EINVAL;
249}
250
251BTF_ID_LIST_SINGLE(queue_map_btf_ids, struct, bpf_queue_stack)
252const struct bpf_map_ops queue_map_ops = {
253 .map_meta_equal = bpf_map_meta_equal,
254 .map_alloc_check = queue_stack_map_alloc_check,
255 .map_alloc = queue_stack_map_alloc,
256 .map_free = queue_stack_map_free,
257 .map_lookup_elem = queue_stack_map_lookup_elem,
258 .map_update_elem = queue_stack_map_update_elem,
259 .map_delete_elem = queue_stack_map_delete_elem,
260 .map_push_elem = queue_stack_map_push_elem,
261 .map_pop_elem = queue_map_pop_elem,
262 .map_peek_elem = queue_map_peek_elem,
263 .map_get_next_key = queue_stack_map_get_next_key,
264 .map_btf_id = &queue_map_btf_ids[0],
265};
266
267const struct bpf_map_ops stack_map_ops = {
268 .map_meta_equal = bpf_map_meta_equal,
269 .map_alloc_check = queue_stack_map_alloc_check,
270 .map_alloc = queue_stack_map_alloc,
271 .map_free = queue_stack_map_free,
272 .map_lookup_elem = queue_stack_map_lookup_elem,
273 .map_update_elem = queue_stack_map_update_elem,
274 .map_delete_elem = queue_stack_map_delete_elem,
275 .map_push_elem = queue_stack_map_push_elem,
276 .map_pop_elem = stack_map_pop_elem,
277 .map_peek_elem = stack_map_peek_elem,
278 .map_get_next_key = queue_stack_map_get_next_key,
279 .map_btf_id = &queue_map_btf_ids[0],
280};
281