1
2
3
4#ifndef _LINUX_SKMSG_H
5#define _LINUX_SKMSG_H
6
7#include <linux/bpf.h>
8#include <linux/filter.h>
9#include <linux/scatterlist.h>
10#include <linux/skbuff.h>
11
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <net/strparser.h>
15
16#define MAX_MSG_FRAGS MAX_SKB_FRAGS
17#define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1)
18
19enum __sk_action {
20 __SK_DROP = 0,
21 __SK_PASS,
22 __SK_REDIRECT,
23 __SK_NONE,
24};
25
26struct sk_msg_sg {
27 u32 start;
28 u32 curr;
29 u32 end;
30 u32 size;
31 u32 copybreak;
32 DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2);
33
34
35
36
37
38
39 struct scatterlist data[MAX_MSG_FRAGS + 2];
40};
41
42
43struct sk_msg {
44 struct sk_msg_sg sg;
45 void *data;
46 void *data_end;
47 u32 apply_bytes;
48 u32 cork_bytes;
49 u32 flags;
50 struct sk_buff *skb;
51 struct sock *sk_redir;
52 struct sock *sk;
53 struct list_head list;
54};
55
56struct sk_psock_progs {
57 struct bpf_prog *msg_parser;
58 struct bpf_prog *stream_parser;
59 struct bpf_prog *stream_verdict;
60 struct bpf_prog *skb_verdict;
61};
62
63enum sk_psock_state_bits {
64 SK_PSOCK_TX_ENABLED,
65};
66
67struct sk_psock_link {
68 struct list_head list;
69 struct bpf_map *map;
70 void *link_raw;
71};
72
73struct sk_psock_work_state {
74 struct sk_buff *skb;
75 u32 len;
76 u32 off;
77};
78
79struct sk_psock {
80 struct sock *sk;
81 struct sock *sk_redir;
82 u32 apply_bytes;
83 u32 cork_bytes;
84 u32 eval;
85 struct sk_msg *cork;
86 struct sk_psock_progs progs;
87#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
88 struct strparser strp;
89#endif
90 struct sk_buff_head ingress_skb;
91 struct list_head ingress_msg;
92 spinlock_t ingress_lock;
93 unsigned long state;
94 struct list_head link;
95 spinlock_t link_lock;
96 refcount_t refcnt;
97 void (*saved_unhash)(struct sock *sk);
98 void (*saved_close)(struct sock *sk, long timeout);
99 void (*saved_write_space)(struct sock *sk);
100 void (*saved_data_ready)(struct sock *sk);
101 int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock,
102 bool restore);
103 struct proto *sk_proto;
104 struct mutex work_mutex;
105 struct sk_psock_work_state work_state;
106 struct work_struct work;
107 struct rcu_work rwork;
108};
109
110int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
111 int elem_first_coalesce);
112int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
113 u32 off, u32 len);
114void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len);
115int sk_msg_free(struct sock *sk, struct sk_msg *msg);
116int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg);
117void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes);
118void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
119 u32 bytes);
120
121void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes);
122void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes);
123
124int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
125 struct sk_msg *msg, u32 bytes);
126int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
127 struct sk_msg *msg, u32 bytes);
128int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
129 int len, int flags);
130bool sk_msg_is_readable(struct sock *sk);
131
132static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
133{
134 WARN_ON(i == msg->sg.end && bytes);
135}
136
137static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
138{
139 if (psock->apply_bytes) {
140 if (psock->apply_bytes < bytes)
141 psock->apply_bytes = 0;
142 else
143 psock->apply_bytes -= bytes;
144 }
145}
146
147static inline u32 sk_msg_iter_dist(u32 start, u32 end)
148{
149 return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start);
150}
151
152#define sk_msg_iter_var_prev(var) \
153 do { \
154 if (var == 0) \
155 var = NR_MSG_FRAG_IDS - 1; \
156 else \
157 var--; \
158 } while (0)
159
160#define sk_msg_iter_var_next(var) \
161 do { \
162 var++; \
163 if (var == NR_MSG_FRAG_IDS) \
164 var = 0; \
165 } while (0)
166
167#define sk_msg_iter_prev(msg, which) \
168 sk_msg_iter_var_prev(msg->sg.which)
169
170#define sk_msg_iter_next(msg, which) \
171 sk_msg_iter_var_next(msg->sg.which)
172
173static inline void sk_msg_init(struct sk_msg *msg)
174{
175 BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS);
176 memset(msg, 0, sizeof(*msg));
177 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
178}
179
180static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
181 int which, u32 size)
182{
183 dst->sg.data[which] = src->sg.data[which];
184 dst->sg.data[which].length = size;
185 dst->sg.size += size;
186 src->sg.size -= size;
187 src->sg.data[which].length -= size;
188 src->sg.data[which].offset += size;
189}
190
191static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src)
192{
193 memcpy(dst, src, sizeof(*src));
194 sk_msg_init(src);
195}
196
197static inline bool sk_msg_full(const struct sk_msg *msg)
198{
199 return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS;
200}
201
202static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
203{
204 return sk_msg_iter_dist(msg->sg.start, msg->sg.end);
205}
206
207static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which)
208{
209 return &msg->sg.data[which];
210}
211
212static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which)
213{
214 return msg->sg.data[which];
215}
216
217static inline struct page *sk_msg_page(struct sk_msg *msg, int which)
218{
219 return sg_page(sk_msg_elem(msg, which));
220}
221
222static inline bool sk_msg_to_ingress(const struct sk_msg *msg)
223{
224 return msg->flags & BPF_F_INGRESS;
225}
226
227static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
228{
229 struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
230
231 if (test_bit(msg->sg.start, msg->sg.copy)) {
232 msg->data = NULL;
233 msg->data_end = NULL;
234 } else {
235 msg->data = sg_virt(sge);
236 msg->data_end = msg->data + sge->length;
237 }
238}
239
240static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
241 u32 len, u32 offset)
242{
243 struct scatterlist *sge;
244
245 get_page(page);
246 sge = sk_msg_elem(msg, msg->sg.end);
247 sg_set_page(sge, page, len, offset);
248 sg_unmark_end(sge);
249
250 __set_bit(msg->sg.end, msg->sg.copy);
251 msg->sg.size += len;
252 sk_msg_iter_next(msg, end);
253}
254
255static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state)
256{
257 do {
258 if (copy_state)
259 __set_bit(i, msg->sg.copy);
260 else
261 __clear_bit(i, msg->sg.copy);
262 sk_msg_iter_var_next(i);
263 if (i == msg->sg.end)
264 break;
265 } while (1);
266}
267
268static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start)
269{
270 sk_msg_sg_copy(msg, start, true);
271}
272
273static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
274{
275 sk_msg_sg_copy(msg, start, false);
276}
277
278static inline struct sk_psock *sk_psock(const struct sock *sk)
279{
280 return rcu_dereference_sk_user_data(sk);
281}
282
283static inline void sk_psock_set_state(struct sk_psock *psock,
284 enum sk_psock_state_bits bit)
285{
286 set_bit(bit, &psock->state);
287}
288
289static inline void sk_psock_clear_state(struct sk_psock *psock,
290 enum sk_psock_state_bits bit)
291{
292 clear_bit(bit, &psock->state);
293}
294
295static inline bool sk_psock_test_state(const struct sk_psock *psock,
296 enum sk_psock_state_bits bit)
297{
298 return test_bit(bit, &psock->state);
299}
300
301static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
302{
303 sk_drops_add(sk, skb);
304 kfree_skb(skb);
305}
306
307static inline void sk_psock_queue_msg(struct sk_psock *psock,
308 struct sk_msg *msg)
309{
310 spin_lock_bh(&psock->ingress_lock);
311 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
312 list_add_tail(&msg->list, &psock->ingress_msg);
313 else {
314 sk_msg_free(psock->sk, msg);
315 kfree(msg);
316 }
317 spin_unlock_bh(&psock->ingress_lock);
318}
319
320static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
321{
322 struct sk_msg *msg;
323
324 spin_lock_bh(&psock->ingress_lock);
325 msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
326 if (msg)
327 list_del(&msg->list);
328 spin_unlock_bh(&psock->ingress_lock);
329 return msg;
330}
331
332static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock)
333{
334 struct sk_msg *msg;
335
336 spin_lock_bh(&psock->ingress_lock);
337 msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
338 spin_unlock_bh(&psock->ingress_lock);
339 return msg;
340}
341
342static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock,
343 struct sk_msg *msg)
344{
345 struct sk_msg *ret;
346
347 spin_lock_bh(&psock->ingress_lock);
348 if (list_is_last(&msg->list, &psock->ingress_msg))
349 ret = NULL;
350 else
351 ret = list_next_entry(msg, list);
352 spin_unlock_bh(&psock->ingress_lock);
353 return ret;
354}
355
356static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
357{
358 return psock ? list_empty(&psock->ingress_msg) : true;
359}
360
361static inline void kfree_sk_msg(struct sk_msg *msg)
362{
363 if (msg->skb)
364 consume_skb(msg->skb);
365 kfree(msg);
366}
367
368static inline void sk_psock_report_error(struct sk_psock *psock, int err)
369{
370 struct sock *sk = psock->sk;
371
372 sk->sk_err = err;
373 sk_error_report(sk);
374}
375
376struct sk_psock *sk_psock_init(struct sock *sk, int node);
377void sk_psock_stop(struct sk_psock *psock, bool wait);
378
379#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
380int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
381void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock);
382void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock);
383#else
384static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
385{
386 return -EOPNOTSUPP;
387}
388
389static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
390{
391}
392
393static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
394{
395}
396#endif
397
398void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock);
399void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock);
400
401int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
402 struct sk_msg *msg);
403
404static inline struct sk_psock_link *sk_psock_init_link(void)
405{
406 return kzalloc(sizeof(struct sk_psock_link),
407 GFP_ATOMIC | __GFP_NOWARN);
408}
409
410static inline void sk_psock_free_link(struct sk_psock_link *link)
411{
412 kfree(link);
413}
414
415struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock);
416
417static inline void sk_psock_cork_free(struct sk_psock *psock)
418{
419 if (psock->cork) {
420 sk_msg_free(psock->sk, psock->cork);
421 kfree(psock->cork);
422 psock->cork = NULL;
423 }
424}
425
426static inline void sk_psock_restore_proto(struct sock *sk,
427 struct sk_psock *psock)
428{
429 if (psock->psock_update_sk_prot)
430 psock->psock_update_sk_prot(sk, psock, true);
431}
432
433static inline struct sk_psock *sk_psock_get(struct sock *sk)
434{
435 struct sk_psock *psock;
436
437 rcu_read_lock();
438 psock = sk_psock(sk);
439 if (psock && !refcount_inc_not_zero(&psock->refcnt))
440 psock = NULL;
441 rcu_read_unlock();
442 return psock;
443}
444
445void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
446
447static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
448{
449 if (refcount_dec_and_test(&psock->refcnt))
450 sk_psock_drop(sk, psock);
451}
452
453static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
454{
455 if (psock->saved_data_ready)
456 psock->saved_data_ready(sk);
457 else
458 sk->sk_data_ready(sk);
459}
460
461static inline void psock_set_prog(struct bpf_prog **pprog,
462 struct bpf_prog *prog)
463{
464 prog = xchg(pprog, prog);
465 if (prog)
466 bpf_prog_put(prog);
467}
468
469static inline int psock_replace_prog(struct bpf_prog **pprog,
470 struct bpf_prog *prog,
471 struct bpf_prog *old)
472{
473 if (cmpxchg(pprog, old, prog) != old)
474 return -ENOENT;
475
476 if (old)
477 bpf_prog_put(old);
478
479 return 0;
480}
481
482static inline void psock_progs_drop(struct sk_psock_progs *progs)
483{
484 psock_set_prog(&progs->msg_parser, NULL);
485 psock_set_prog(&progs->stream_parser, NULL);
486 psock_set_prog(&progs->stream_verdict, NULL);
487 psock_set_prog(&progs->skb_verdict, NULL);
488}
489
490int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb);
491
492static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
493{
494 if (!psock)
495 return false;
496 return !!psock->saved_data_ready;
497}
498
499static inline bool sk_is_udp(const struct sock *sk)
500{
501 return sk->sk_type == SOCK_DGRAM &&
502 sk->sk_protocol == IPPROTO_UDP;
503}
504
505#if IS_ENABLED(CONFIG_NET_SOCK_MSG)
506
507#define BPF_F_STRPARSER (1UL << 1)
508
509
510#define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER)
511
512static inline bool skb_bpf_strparser(const struct sk_buff *skb)
513{
514 unsigned long sk_redir = skb->_sk_redir;
515
516 return sk_redir & BPF_F_STRPARSER;
517}
518
519static inline void skb_bpf_set_strparser(struct sk_buff *skb)
520{
521 skb->_sk_redir |= BPF_F_STRPARSER;
522}
523
524static inline bool skb_bpf_ingress(const struct sk_buff *skb)
525{
526 unsigned long sk_redir = skb->_sk_redir;
527
528 return sk_redir & BPF_F_INGRESS;
529}
530
531static inline void skb_bpf_set_ingress(struct sk_buff *skb)
532{
533 skb->_sk_redir |= BPF_F_INGRESS;
534}
535
536static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir,
537 bool ingress)
538{
539 skb->_sk_redir = (unsigned long)sk_redir;
540 if (ingress)
541 skb->_sk_redir |= BPF_F_INGRESS;
542}
543
544static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb)
545{
546 unsigned long sk_redir = skb->_sk_redir;
547
548 return (struct sock *)(sk_redir & BPF_F_PTR_MASK);
549}
550
551static inline void skb_bpf_redirect_clear(struct sk_buff *skb)
552{
553 skb->_sk_redir = 0;
554}
555#endif
556#endif
557