1
2
3
4
5
6
7
8#include <linux/atomic.h>
9#include <linux/seqlock.h>
10#include <net/net_namespace.h>
11#include <net/netns/generic.h>
12#include <net/sock.h>
13#include <net/af_rxrpc.h>
14#include "protocol.h"
15
16#if 0
17#define CHECK_SLAB_OKAY(X) \
18 BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
19 (POISON_FREE << 8 | POISON_FREE))
20#else
21#define CHECK_SLAB_OKAY(X) do {} while (0)
22#endif
23
24#define FCRYPT_BSIZE 8
25struct rxrpc_crypt {
26 union {
27 u8 x[FCRYPT_BSIZE];
28 __be32 n[2];
29 };
30} __attribute__((aligned(8)));
31
32#define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
33#define rxrpc_queue_delayed_work(WS,D) \
34 queue_delayed_work(rxrpc_workqueue, (WS), (D))
35
36struct rxrpc_connection;
37
38
39
40
41
42enum rxrpc_skb_mark {
43 RXRPC_SKB_MARK_REJECT_BUSY,
44 RXRPC_SKB_MARK_REJECT_ABORT,
45};
46
47
48
49
50enum {
51 RXRPC_UNBOUND = 0,
52 RXRPC_CLIENT_UNBOUND,
53 RXRPC_CLIENT_BOUND,
54 RXRPC_SERVER_BOUND,
55 RXRPC_SERVER_BOUND2,
56 RXRPC_SERVER_LISTENING,
57 RXRPC_SERVER_LISTEN_DISABLED,
58 RXRPC_CLOSE,
59};
60
61
62
63
64struct rxrpc_net {
65 struct proc_dir_entry *proc_net;
66 u32 epoch;
67 struct list_head calls;
68 rwlock_t call_lock;
69 atomic_t nr_calls;
70
71 atomic_t nr_conns;
72 struct list_head conn_proc_list;
73 struct list_head service_conns;
74 rwlock_t conn_lock;
75 struct work_struct service_conn_reaper;
76 struct timer_list service_conn_reap_timer;
77
78 unsigned int nr_client_conns;
79 unsigned int nr_active_client_conns;
80 bool kill_all_client_conns;
81 bool live;
82 spinlock_t client_conn_cache_lock;
83 spinlock_t client_conn_discard_lock;
84 struct list_head waiting_client_conns;
85 struct list_head active_client_conns;
86 struct list_head idle_client_conns;
87 struct work_struct client_conn_reaper;
88 struct timer_list client_conn_reap_timer;
89
90 struct list_head local_endpoints;
91 struct mutex local_mutex;
92
93 DECLARE_HASHTABLE (peer_hash, 10);
94 spinlock_t peer_hash_lock;
95
96#define RXRPC_KEEPALIVE_TIME 20
97 u8 peer_keepalive_cursor;
98 time64_t peer_keepalive_base;
99 struct list_head peer_keepalive[32];
100 struct list_head peer_keepalive_new;
101 struct timer_list peer_keepalive_timer;
102 struct work_struct peer_keepalive_work;
103};
104
105
106
107
108
109
110
111
112
113struct rxrpc_backlog {
114 unsigned short peer_backlog_head;
115 unsigned short peer_backlog_tail;
116 unsigned short conn_backlog_head;
117 unsigned short conn_backlog_tail;
118 unsigned short call_backlog_head;
119 unsigned short call_backlog_tail;
120#define RXRPC_BACKLOG_MAX 32
121 struct rxrpc_peer *peer_backlog[RXRPC_BACKLOG_MAX];
122 struct rxrpc_connection *conn_backlog[RXRPC_BACKLOG_MAX];
123 struct rxrpc_call *call_backlog[RXRPC_BACKLOG_MAX];
124};
125
126
127
128
129struct rxrpc_sock {
130
131 struct sock sk;
132 rxrpc_notify_new_call_t notify_new_call;
133 rxrpc_discard_new_call_t discard_new_call;
134 struct rxrpc_local *local;
135 struct rxrpc_backlog *backlog;
136 spinlock_t incoming_lock;
137 struct list_head sock_calls;
138 struct list_head to_be_accepted;
139 struct list_head recvmsg_q;
140 rwlock_t recvmsg_lock;
141 struct key *key;
142 struct key *securities;
143 struct rb_root calls;
144 unsigned long flags;
145#define RXRPC_SOCK_CONNECTED 0
146 rwlock_t call_lock;
147 u32 min_sec_level;
148#define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
149 bool exclusive;
150 u16 second_service;
151 struct {
152
153 u16 from;
154 u16 to;
155 } service_upgrade;
156 sa_family_t family;
157 struct sockaddr_rxrpc srx;
158 struct sockaddr_rxrpc connect_srx;
159};
160
161#define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
162
163
164
165
166struct rxrpc_host_header {
167 u32 epoch;
168 u32 cid;
169 u32 callNumber;
170 u32 seq;
171 u32 serial;
172 u8 type;
173 u8 flags;
174 u8 userStatus;
175 u8 securityIndex;
176 union {
177 u16 _rsvd;
178 u16 cksum;
179 };
180 u16 serviceId;
181} __packed;
182
183
184
185
186
187struct rxrpc_skb_priv {
188 union {
189 u8 nr_jumbo;
190 };
191 union {
192 int remain;
193 };
194
195 struct rxrpc_host_header hdr;
196};
197
198#define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
199
200
201
202
203struct rxrpc_security {
204 const char *name;
205 u8 security_index;
206
207
208 int (*init)(void);
209
210
211 void (*exit)(void);
212
213
214 int (*init_connection_security)(struct rxrpc_connection *);
215
216
217 int (*prime_packet_security)(struct rxrpc_connection *);
218
219
220 int (*secure_packet)(struct rxrpc_call *,
221 struct sk_buff *,
222 size_t,
223 void *);
224
225
226 int (*verify_packet)(struct rxrpc_call *, struct sk_buff *,
227 unsigned int, unsigned int, rxrpc_seq_t, u16);
228
229
230 void (*locate_data)(struct rxrpc_call *, struct sk_buff *,
231 unsigned int *, unsigned int *);
232
233
234 int (*issue_challenge)(struct rxrpc_connection *);
235
236
237 int (*respond_to_challenge)(struct rxrpc_connection *,
238 struct sk_buff *,
239 u32 *);
240
241
242 int (*verify_response)(struct rxrpc_connection *,
243 struct sk_buff *,
244 u32 *);
245
246
247 void (*clear)(struct rxrpc_connection *);
248};
249
250
251
252
253
254
255struct rxrpc_local {
256 struct rcu_head rcu;
257 atomic_t usage;
258 struct rxrpc_net *rxnet;
259 struct list_head link;
260 struct socket *socket;
261 struct work_struct processor;
262 struct rxrpc_sock __rcu *service;
263 struct rw_semaphore defrag_sem;
264 struct sk_buff_head reject_queue;
265 struct sk_buff_head event_queue;
266 struct rb_root client_conns;
267 spinlock_t client_conns_lock;
268 spinlock_t lock;
269 rwlock_t services_lock;
270 int debug_id;
271 bool dead;
272 bool service_closed;
273 struct sockaddr_rxrpc srx;
274};
275
276
277
278
279
280struct rxrpc_peer {
281 struct rcu_head rcu;
282 atomic_t usage;
283 unsigned long hash_key;
284 struct hlist_node hash_link;
285 struct rxrpc_local *local;
286 struct hlist_head error_targets;
287 struct rb_root service_conns;
288 struct list_head keepalive_link;
289 time64_t last_tx_at;
290 seqlock_t service_conn_lock;
291 spinlock_t lock;
292 unsigned int if_mtu;
293 unsigned int mtu;
294 unsigned int maxdata;
295 unsigned short hdrsize;
296 int debug_id;
297 struct sockaddr_rxrpc srx;
298
299
300#define RXRPC_RTT_CACHE_SIZE 32
301 spinlock_t rtt_input_lock;
302 ktime_t rtt_last_req;
303 u64 rtt;
304 u64 rtt_sum;
305 u64 rtt_cache[RXRPC_RTT_CACHE_SIZE];
306 u8 rtt_cursor;
307 u8 rtt_usage;
308
309 u8 cong_cwnd;
310};
311
312
313
314
315struct rxrpc_conn_proto {
316 union {
317 struct {
318 u32 epoch;
319 u32 cid;
320 };
321 u64 index_key;
322 };
323};
324
325struct rxrpc_conn_parameters {
326 struct rxrpc_local *local;
327 struct rxrpc_peer *peer;
328 struct key *key;
329 bool exclusive;
330 bool upgrade;
331 u16 service_id;
332 u32 security_level;
333};
334
335
336
337
338enum rxrpc_conn_flag {
339 RXRPC_CONN_HAS_IDR,
340 RXRPC_CONN_IN_SERVICE_CONNS,
341 RXRPC_CONN_IN_CLIENT_CONNS,
342 RXRPC_CONN_EXPOSED,
343 RXRPC_CONN_DONT_REUSE,
344 RXRPC_CONN_COUNTED,
345 RXRPC_CONN_PROBING_FOR_UPGRADE,
346 RXRPC_CONN_FINAL_ACK_0,
347 RXRPC_CONN_FINAL_ACK_1,
348 RXRPC_CONN_FINAL_ACK_2,
349 RXRPC_CONN_FINAL_ACK_3,
350};
351
352#define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) | \
353 (1UL << RXRPC_CONN_FINAL_ACK_1) | \
354 (1UL << RXRPC_CONN_FINAL_ACK_2) | \
355 (1UL << RXRPC_CONN_FINAL_ACK_3))
356
357
358
359
360enum rxrpc_conn_event {
361 RXRPC_CONN_EV_CHALLENGE,
362};
363
364
365
366
367enum rxrpc_conn_cache_state {
368 RXRPC_CONN_CLIENT_INACTIVE,
369 RXRPC_CONN_CLIENT_WAITING,
370 RXRPC_CONN_CLIENT_ACTIVE,
371 RXRPC_CONN_CLIENT_UPGRADE,
372 RXRPC_CONN_CLIENT_CULLED,
373 RXRPC_CONN_CLIENT_IDLE,
374 RXRPC_CONN__NR_CACHE_STATES
375};
376
377
378
379
380enum rxrpc_conn_proto_state {
381 RXRPC_CONN_UNUSED,
382 RXRPC_CONN_CLIENT,
383 RXRPC_CONN_SERVICE_PREALLOC,
384 RXRPC_CONN_SERVICE_UNSECURED,
385 RXRPC_CONN_SERVICE_CHALLENGING,
386 RXRPC_CONN_SERVICE,
387 RXRPC_CONN_REMOTELY_ABORTED,
388 RXRPC_CONN_LOCALLY_ABORTED,
389 RXRPC_CONN__NR_STATES
390};
391
392
393
394
395
396
397struct rxrpc_connection {
398 struct rxrpc_conn_proto proto;
399 struct rxrpc_conn_parameters params;
400
401 atomic_t usage;
402 struct rcu_head rcu;
403 struct list_head cache_link;
404
405 spinlock_t channel_lock;
406 unsigned char active_chans;
407#define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
408 struct list_head waiting_calls;
409 struct rxrpc_channel {
410 unsigned long final_ack_at;
411 struct rxrpc_call __rcu *call;
412 unsigned int call_debug_id;
413 u32 call_id;
414 u32 call_counter;
415 u32 last_call;
416 u8 last_type;
417 union {
418 u32 last_seq;
419 u32 last_abort;
420 };
421 } channels[RXRPC_MAXCALLS];
422
423 struct timer_list timer;
424 struct work_struct processor;
425 union {
426 struct rb_node client_node;
427 struct rb_node service_node;
428 };
429 struct list_head proc_link;
430 struct list_head link;
431 struct sk_buff_head rx_queue;
432 const struct rxrpc_security *security;
433 struct key *server_key;
434 struct crypto_sync_skcipher *cipher;
435 struct rxrpc_crypt csum_iv;
436 unsigned long flags;
437 unsigned long events;
438 unsigned long idle_timestamp;
439 spinlock_t state_lock;
440 enum rxrpc_conn_cache_state cache_state;
441 enum rxrpc_conn_proto_state state;
442 u32 abort_code;
443 int debug_id;
444 atomic_t serial;
445 unsigned int hi_serial;
446 u32 security_nonce;
447 u32 service_id;
448 u8 size_align;
449 u8 security_size;
450 u8 security_ix;
451 u8 out_clientflag;
452 short error;
453};
454
455static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
456{
457 return sp->hdr.flags & RXRPC_CLIENT_INITIATED;
458}
459
460static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp)
461{
462 return !rxrpc_to_server(sp);
463}
464
465
466
467
468enum rxrpc_call_flag {
469 RXRPC_CALL_RELEASED,
470 RXRPC_CALL_HAS_USERID,
471 RXRPC_CALL_IS_SERVICE,
472 RXRPC_CALL_EXPOSED,
473 RXRPC_CALL_RX_LAST,
474 RXRPC_CALL_TX_LAST,
475 RXRPC_CALL_SEND_PING,
476 RXRPC_CALL_PINGING,
477 RXRPC_CALL_RETRANS_TIMEOUT,
478 RXRPC_CALL_BEGAN_RX_TIMER,
479 RXRPC_CALL_RX_HEARD,
480 RXRPC_CALL_RX_UNDERRUN,
481 RXRPC_CALL_IS_INTR,
482};
483
484
485
486
487enum rxrpc_call_event {
488 RXRPC_CALL_EV_ACK,
489 RXRPC_CALL_EV_ABORT,
490 RXRPC_CALL_EV_RESEND,
491 RXRPC_CALL_EV_PING,
492 RXRPC_CALL_EV_EXPIRED,
493 RXRPC_CALL_EV_ACK_LOST,
494};
495
496
497
498
499enum rxrpc_call_state {
500 RXRPC_CALL_UNINITIALISED,
501 RXRPC_CALL_CLIENT_AWAIT_CONN,
502 RXRPC_CALL_CLIENT_SEND_REQUEST,
503 RXRPC_CALL_CLIENT_AWAIT_REPLY,
504 RXRPC_CALL_CLIENT_RECV_REPLY,
505 RXRPC_CALL_SERVER_PREALLOC,
506 RXRPC_CALL_SERVER_SECURING,
507 RXRPC_CALL_SERVER_ACCEPTING,
508 RXRPC_CALL_SERVER_RECV_REQUEST,
509 RXRPC_CALL_SERVER_ACK_REQUEST,
510 RXRPC_CALL_SERVER_SEND_REPLY,
511 RXRPC_CALL_SERVER_AWAIT_ACK,
512 RXRPC_CALL_COMPLETE,
513 NR__RXRPC_CALL_STATES
514};
515
516
517
518
519enum rxrpc_call_completion {
520 RXRPC_CALL_SUCCEEDED,
521 RXRPC_CALL_REMOTELY_ABORTED,
522 RXRPC_CALL_LOCALLY_ABORTED,
523 RXRPC_CALL_LOCAL_ERROR,
524 RXRPC_CALL_NETWORK_ERROR,
525 NR__RXRPC_CALL_COMPLETIONS
526};
527
528
529
530
531enum rxrpc_congest_mode {
532 RXRPC_CALL_SLOW_START,
533 RXRPC_CALL_CONGEST_AVOIDANCE,
534 RXRPC_CALL_PACKET_LOSS,
535 RXRPC_CALL_FAST_RETRANSMIT,
536 NR__RXRPC_CONGEST_MODES
537};
538
539
540
541
542
543struct rxrpc_call {
544 struct rcu_head rcu;
545 struct rxrpc_connection *conn;
546 struct rxrpc_peer *peer;
547 struct rxrpc_sock __rcu *socket;
548 struct rxrpc_net *rxnet;
549 struct mutex user_mutex;
550 unsigned long ack_at;
551 unsigned long ack_lost_at;
552 unsigned long resend_at;
553 unsigned long ping_at;
554 unsigned long keepalive_at;
555 unsigned long expect_rx_by;
556 unsigned long expect_req_by;
557 unsigned long expect_term_by;
558 u32 next_rx_timo;
559 u32 next_req_timo;
560 struct timer_list timer;
561 struct work_struct processor;
562 rxrpc_notify_rx_t notify_rx;
563 struct list_head link;
564 struct list_head chan_wait_link;
565 struct hlist_node error_link;
566 struct list_head accept_link;
567 struct list_head recvmsg_link;
568 struct list_head sock_link;
569 struct rb_node sock_node;
570 struct sk_buff *tx_pending;
571 wait_queue_head_t waitq;
572 s64 tx_total_len;
573 __be32 crypto_buf[2];
574 unsigned long user_call_ID;
575 unsigned long flags;
576 unsigned long events;
577 spinlock_t lock;
578 spinlock_t notify_lock;
579 rwlock_t state_lock;
580 u32 abort_code;
581 int error;
582 enum rxrpc_call_state state;
583 enum rxrpc_call_completion completion;
584 atomic_t usage;
585 u16 service_id;
586 u8 security_ix;
587 u32 call_id;
588 u32 cid;
589 int debug_id;
590 unsigned short rx_pkt_offset;
591 unsigned short rx_pkt_len;
592
593
594
595
596
597
598
599
600
601
602#define RXRPC_RXTX_BUFF_SIZE 64
603#define RXRPC_RXTX_BUFF_MASK (RXRPC_RXTX_BUFF_SIZE - 1)
604#define RXRPC_INIT_RX_WINDOW_SIZE 63
605 struct sk_buff **rxtx_buffer;
606 u8 *rxtx_annotations;
607#define RXRPC_TX_ANNO_ACK 0
608#define RXRPC_TX_ANNO_UNACK 1
609#define RXRPC_TX_ANNO_NAK 2
610#define RXRPC_TX_ANNO_RETRANS 3
611#define RXRPC_TX_ANNO_MASK 0x03
612#define RXRPC_TX_ANNO_LAST 0x04
613#define RXRPC_TX_ANNO_RESENT 0x08
614
615#define RXRPC_RX_ANNO_JUMBO 0x3f
616#define RXRPC_RX_ANNO_JLAST 0x40
617#define RXRPC_RX_ANNO_VERIFIED 0x80
618 rxrpc_seq_t tx_hard_ack;
619
620
621 rxrpc_seq_t tx_top;
622 u16 tx_backoff;
623
624
625
626
627
628#define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN
629 u8 cong_cwnd;
630 u8 cong_extra;
631 u8 cong_ssthresh;
632 enum rxrpc_congest_mode cong_mode:8;
633 u8 cong_dup_acks;
634 u8 cong_cumul_acks;
635 ktime_t cong_tstamp;
636
637 rxrpc_seq_t rx_hard_ack;
638
639
640 rxrpc_seq_t rx_top;
641 rxrpc_seq_t rx_expect_next;
642 rxrpc_serial_t rx_serial;
643 u8 rx_winsize;
644 u8 tx_winsize;
645 bool tx_phase;
646 u8 nr_jumbo_bad;
647
648 spinlock_t input_lock;
649
650
651 u8 ackr_reason;
652 u16 ackr_skew;
653 rxrpc_serial_t ackr_serial;
654 rxrpc_serial_t ackr_first_seq;
655 rxrpc_seq_t ackr_prev_seq;
656 rxrpc_seq_t ackr_consumed;
657 rxrpc_seq_t ackr_seen;
658
659
660 rxrpc_serial_t ping_serial;
661 ktime_t ping_time;
662
663
664 ktime_t acks_latest_ts;
665 rxrpc_serial_t acks_latest;
666 rxrpc_seq_t acks_lowest_nak;
667 rxrpc_seq_t acks_lost_top;
668 rxrpc_serial_t acks_lost_ping;
669};
670
671
672
673
674struct rxrpc_ack_summary {
675 u8 ack_reason;
676 u8 nr_acks;
677 u8 nr_nacks;
678 u8 nr_new_acks;
679 u8 nr_new_nacks;
680 u8 nr_rot_new_acks;
681 bool new_low_nack;
682 bool retrans_timeo;
683 u8 flight_size;
684
685 enum rxrpc_congest_mode mode:8;
686 u8 cwnd;
687 u8 ssthresh;
688 u8 dup_acks;
689 u8 cumulative_acks;
690};
691
692
693
694
695enum rxrpc_command {
696 RXRPC_CMD_SEND_DATA,
697 RXRPC_CMD_SEND_ABORT,
698 RXRPC_CMD_ACCEPT,
699 RXRPC_CMD_REJECT_BUSY,
700};
701
702struct rxrpc_call_params {
703 s64 tx_total_len;
704 unsigned long user_call_ID;
705 struct {
706 u32 hard;
707 u32 idle;
708 u32 normal;
709 } timeouts;
710 u8 nr_timeouts;
711 bool intr;
712};
713
714struct rxrpc_send_params {
715 struct rxrpc_call_params call;
716 u32 abort_code;
717 enum rxrpc_command command : 8;
718 bool exclusive;
719 bool upgrade;
720};
721
722#include <trace/events/rxrpc.h>
723
724
725
726
727extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
728extern struct workqueue_struct *rxrpc_workqueue;
729
730
731
732
733int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
734void rxrpc_discard_prealloc(struct rxrpc_sock *);
735struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
736 struct rxrpc_sock *,
737 struct sk_buff *);
738void rxrpc_accept_incoming_calls(struct rxrpc_local *);
739struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
740 rxrpc_notify_rx_t);
741int rxrpc_reject_call(struct rxrpc_sock *);
742
743
744
745
746void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
747 enum rxrpc_propose_ack_trace);
748void rxrpc_process_call(struct work_struct *);
749
750static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call,
751 unsigned long expire_at,
752 unsigned long now,
753 enum rxrpc_timer_trace why)
754{
755 trace_rxrpc_timer(call, why, now);
756 timer_reduce(&call->timer, expire_at);
757}
758
759
760
761
762extern const char *const rxrpc_call_states[];
763extern const char *const rxrpc_call_completions[];
764extern unsigned int rxrpc_max_call_lifetime;
765extern struct kmem_cache *rxrpc_call_jar;
766
767struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
768struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t, unsigned int);
769struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
770 struct rxrpc_conn_parameters *,
771 struct sockaddr_rxrpc *,
772 struct rxrpc_call_params *, gfp_t,
773 unsigned int);
774void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
775 struct sk_buff *);
776void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
777void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
778bool __rxrpc_queue_call(struct rxrpc_call *);
779bool rxrpc_queue_call(struct rxrpc_call *);
780void rxrpc_see_call(struct rxrpc_call *);
781void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
782void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
783void rxrpc_cleanup_call(struct rxrpc_call *);
784void rxrpc_destroy_all_calls(struct rxrpc_net *);
785
786static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
787{
788 return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags);
789}
790
791static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
792{
793 return !rxrpc_is_service_call(call);
794}
795
796
797
798
799static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call,
800 enum rxrpc_call_completion compl,
801 u32 abort_code,
802 int error)
803{
804 if (call->state < RXRPC_CALL_COMPLETE) {
805 call->abort_code = abort_code;
806 call->error = error;
807 call->completion = compl,
808 call->state = RXRPC_CALL_COMPLETE;
809 trace_rxrpc_call_complete(call);
810 wake_up(&call->waitq);
811 return true;
812 }
813 return false;
814}
815
816static inline bool rxrpc_set_call_completion(struct rxrpc_call *call,
817 enum rxrpc_call_completion compl,
818 u32 abort_code,
819 int error)
820{
821 bool ret;
822
823 write_lock_bh(&call->state_lock);
824 ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
825 write_unlock_bh(&call->state_lock);
826 return ret;
827}
828
829
830
831
832static inline bool __rxrpc_call_completed(struct rxrpc_call *call)
833{
834 return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
835}
836
837static inline bool rxrpc_call_completed(struct rxrpc_call *call)
838{
839 bool ret;
840
841 write_lock_bh(&call->state_lock);
842 ret = __rxrpc_call_completed(call);
843 write_unlock_bh(&call->state_lock);
844 return ret;
845}
846
847
848
849
850static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
851 rxrpc_seq_t seq,
852 u32 abort_code, int error)
853{
854 trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
855 abort_code, error);
856 return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
857 abort_code, error);
858}
859
860static inline bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
861 rxrpc_seq_t seq, u32 abort_code, int error)
862{
863 bool ret;
864
865 write_lock_bh(&call->state_lock);
866 ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
867 write_unlock_bh(&call->state_lock);
868 return ret;
869}
870
871
872
873
874static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
875 struct sk_buff *skb,
876 const char *eproto_why,
877 const char *why,
878 u32 abort_code)
879{
880 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
881
882 trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why);
883 return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO);
884}
885
886#define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \
887 __rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \
888 (abort_why), (abort_code))
889
890
891
892
893extern unsigned int rxrpc_max_client_connections;
894extern unsigned int rxrpc_reap_client_connections;
895extern unsigned long rxrpc_conn_idle_client_expiry;
896extern unsigned long rxrpc_conn_idle_client_fast_expiry;
897extern struct idr rxrpc_client_conn_ids;
898
899void rxrpc_destroy_client_conn_ids(void);
900int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *,
901 struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *,
902 gfp_t);
903void rxrpc_expose_client_call(struct rxrpc_call *);
904void rxrpc_disconnect_client_call(struct rxrpc_call *);
905void rxrpc_put_client_conn(struct rxrpc_connection *);
906void rxrpc_discard_expired_client_conns(struct work_struct *);
907void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
908
909
910
911
912void rxrpc_process_connection(struct work_struct *);
913
914
915
916
917extern unsigned int rxrpc_connection_expiry;
918extern unsigned int rxrpc_closed_conn_expiry;
919
920struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
921struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
922 struct sk_buff *,
923 struct rxrpc_peer **);
924void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
925void rxrpc_disconnect_call(struct rxrpc_call *);
926void rxrpc_kill_connection(struct rxrpc_connection *);
927bool rxrpc_queue_conn(struct rxrpc_connection *);
928void rxrpc_see_connection(struct rxrpc_connection *);
929void rxrpc_get_connection(struct rxrpc_connection *);
930struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *);
931void rxrpc_put_service_conn(struct rxrpc_connection *);
932void rxrpc_service_connection_reaper(struct work_struct *);
933void rxrpc_destroy_all_connections(struct rxrpc_net *);
934
935static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
936{
937 return conn->out_clientflag;
938}
939
940static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
941{
942 return !rxrpc_conn_is_client(conn);
943}
944
945static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
946{
947 if (!conn)
948 return;
949
950 if (rxrpc_conn_is_client(conn))
951 rxrpc_put_client_conn(conn);
952 else
953 rxrpc_put_service_conn(conn);
954}
955
956static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
957 unsigned long expire_at)
958{
959 timer_reduce(&conn->timer, expire_at);
960}
961
962
963
964
965struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
966 struct sk_buff *);
967struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t);
968void rxrpc_new_incoming_connection(struct rxrpc_sock *,
969 struct rxrpc_connection *, struct sk_buff *);
970void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
971
972
973
974
975int rxrpc_input_packet(struct sock *, struct sk_buff *);
976
977
978
979
980extern const struct rxrpc_security rxrpc_no_security;
981
982
983
984
985extern struct key_type key_type_rxrpc;
986extern struct key_type key_type_rxrpc_s;
987
988int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
989int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
990int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time64_t,
991 u32);
992
993
994
995
996extern void rxrpc_process_local_events(struct rxrpc_local *);
997
998
999
1000
1001struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *);
1002struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
1003struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
1004void rxrpc_put_local(struct rxrpc_local *);
1005void rxrpc_queue_local(struct rxrpc_local *);
1006void rxrpc_destroy_all_locals(struct rxrpc_net *);
1007
1008
1009
1010
1011extern unsigned int rxrpc_max_backlog __read_mostly;
1012extern unsigned long rxrpc_requested_ack_delay;
1013extern unsigned long rxrpc_soft_ack_delay;
1014extern unsigned long rxrpc_idle_ack_delay;
1015extern unsigned int rxrpc_rx_window_size;
1016extern unsigned int rxrpc_rx_mtu;
1017extern unsigned int rxrpc_rx_jumbo_max;
1018extern unsigned long rxrpc_resend_timeout;
1019
1020extern const s8 rxrpc_ack_priority[];
1021
1022
1023
1024
1025extern unsigned int rxrpc_net_id;
1026extern struct pernet_operations rxrpc_net_ops;
1027
1028static inline struct rxrpc_net *rxrpc_net(struct net *net)
1029{
1030 return net_generic(net, rxrpc_net_id);
1031}
1032
1033
1034
1035
1036int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *);
1037int rxrpc_send_abort_packet(struct rxrpc_call *);
1038int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
1039void rxrpc_reject_packets(struct rxrpc_local *);
1040void rxrpc_send_keepalive(struct rxrpc_peer *);
1041
1042
1043
1044
1045void rxrpc_error_report(struct sock *);
1046void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
1047 rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
1048void rxrpc_peer_keepalive_worker(struct work_struct *);
1049
1050
1051
1052
1053struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
1054 const struct sockaddr_rxrpc *);
1055struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *,
1056 struct sockaddr_rxrpc *, gfp_t);
1057struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
1058void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *,
1059 struct rxrpc_peer *);
1060void rxrpc_destroy_all_peers(struct rxrpc_net *);
1061struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
1062struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
1063void rxrpc_put_peer(struct rxrpc_peer *);
1064
1065
1066
1067
1068extern const struct seq_operations rxrpc_call_seq_ops;
1069extern const struct seq_operations rxrpc_connection_seq_ops;
1070extern const struct seq_operations rxrpc_peer_seq_ops;
1071
1072
1073
1074
1075void rxrpc_notify_socket(struct rxrpc_call *);
1076int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
1077
1078
1079
1080
1081#ifdef CONFIG_RXKAD
1082extern const struct rxrpc_security rxkad;
1083#endif
1084
1085
1086
1087
1088int __init rxrpc_init_security(void);
1089void rxrpc_exit_security(void);
1090int rxrpc_init_client_conn_security(struct rxrpc_connection *);
1091int rxrpc_init_server_conn_security(struct rxrpc_connection *);
1092
1093
1094
1095
1096int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
1097
1098
1099
1100
1101void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
1102void rxrpc_packet_destructor(struct sk_buff *);
1103void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
1104void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
1105void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
1106void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
1107void rxrpc_purge_queue(struct sk_buff_head *);
1108
1109
1110
1111
1112#ifdef CONFIG_SYSCTL
1113extern int __init rxrpc_sysctl_init(void);
1114extern void rxrpc_sysctl_exit(void);
1115#else
1116static inline int __init rxrpc_sysctl_init(void) { return 0; }
1117static inline void rxrpc_sysctl_exit(void) {}
1118#endif
1119
1120
1121
1122
1123int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
1124
1125static inline bool before(u32 seq1, u32 seq2)
1126{
1127 return (s32)(seq1 - seq2) < 0;
1128}
1129static inline bool before_eq(u32 seq1, u32 seq2)
1130{
1131 return (s32)(seq1 - seq2) <= 0;
1132}
1133static inline bool after(u32 seq1, u32 seq2)
1134{
1135 return (s32)(seq1 - seq2) > 0;
1136}
1137static inline bool after_eq(u32 seq1, u32 seq2)
1138{
1139 return (s32)(seq1 - seq2) >= 0;
1140}
1141
1142
1143
1144
1145extern unsigned int rxrpc_debug;
1146
1147#define dbgprintk(FMT,...) \
1148 printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
1149
1150#define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
1151#define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
1152#define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
1153#define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
1154#define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
1155
1156
1157#if defined(__KDEBUG)
1158#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
1159#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
1160#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
1161#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
1162#define _net(FMT,...) knet(FMT,##__VA_ARGS__)
1163
1164#elif defined(CONFIG_AF_RXRPC_DEBUG)
1165#define RXRPC_DEBUG_KENTER 0x01
1166#define RXRPC_DEBUG_KLEAVE 0x02
1167#define RXRPC_DEBUG_KDEBUG 0x04
1168#define RXRPC_DEBUG_KPROTO 0x08
1169#define RXRPC_DEBUG_KNET 0x10
1170
1171#define _enter(FMT,...) \
1172do { \
1173 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
1174 kenter(FMT,##__VA_ARGS__); \
1175} while (0)
1176
1177#define _leave(FMT,...) \
1178do { \
1179 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
1180 kleave(FMT,##__VA_ARGS__); \
1181} while (0)
1182
1183#define _debug(FMT,...) \
1184do { \
1185 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
1186 kdebug(FMT,##__VA_ARGS__); \
1187} while (0)
1188
1189#define _proto(FMT,...) \
1190do { \
1191 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
1192 kproto(FMT,##__VA_ARGS__); \
1193} while (0)
1194
1195#define _net(FMT,...) \
1196do { \
1197 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
1198 knet(FMT,##__VA_ARGS__); \
1199} while (0)
1200
1201#else
1202#define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
1203#define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
1204#define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
1205#define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
1206#define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
1207#endif
1208
1209
1210
1211
1212#if 1
1213
1214#define ASSERT(X) \
1215do { \
1216 if (unlikely(!(X))) { \
1217 pr_err("Assertion failed\n"); \
1218 BUG(); \
1219 } \
1220} while (0)
1221
1222#define ASSERTCMP(X, OP, Y) \
1223do { \
1224 __typeof__(X) _x = (X); \
1225 __typeof__(Y) _y = (__typeof__(X))(Y); \
1226 if (unlikely(!(_x OP _y))) { \
1227 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
1228 (unsigned long)_x, (unsigned long)_x, #OP, \
1229 (unsigned long)_y, (unsigned long)_y); \
1230 BUG(); \
1231 } \
1232} while (0)
1233
1234#define ASSERTIF(C, X) \
1235do { \
1236 if (unlikely((C) && !(X))) { \
1237 pr_err("Assertion failed\n"); \
1238 BUG(); \
1239 } \
1240} while (0)
1241
1242#define ASSERTIFCMP(C, X, OP, Y) \
1243do { \
1244 __typeof__(X) _x = (X); \
1245 __typeof__(Y) _y = (__typeof__(X))(Y); \
1246 if (unlikely((C) && !(_x OP _y))) { \
1247 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
1248 (unsigned long)_x, (unsigned long)_x, #OP, \
1249 (unsigned long)_y, (unsigned long)_y); \
1250 BUG(); \
1251 } \
1252} while (0)
1253
1254#else
1255
1256#define ASSERT(X) \
1257do { \
1258} while (0)
1259
1260#define ASSERTCMP(X, OP, Y) \
1261do { \
1262} while (0)
1263
1264#define ASSERTIF(C, X) \
1265do { \
1266} while (0)
1267
1268#define ASSERTIFCMP(C, X, OP, Y) \
1269do { \
1270} while (0)
1271
1272#endif
1273