1
2
3
4
5
6
7
8#include <linux/atomic.h>
9#include <linux/seqlock.h>
10#include <net/net_namespace.h>
11#include <net/netns/generic.h>
12#include <net/sock.h>
13#include <net/af_rxrpc.h>
14#include "protocol.h"
15
16#if 0
17#define CHECK_SLAB_OKAY(X) \
18 BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
19 (POISON_FREE << 8 | POISON_FREE))
20#else
21#define CHECK_SLAB_OKAY(X) do {} while (0)
22#endif
23
24#define FCRYPT_BSIZE 8
25struct rxrpc_crypt {
26 union {
27 u8 x[FCRYPT_BSIZE];
28 __be32 n[2];
29 };
30} __attribute__((aligned(8)));
31
32#define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
33#define rxrpc_queue_delayed_work(WS,D) \
34 queue_delayed_work(rxrpc_workqueue, (WS), (D))
35
36struct rxrpc_connection;
37
38
39
40
41
42enum rxrpc_skb_mark {
43 RXRPC_SKB_MARK_REJECT_BUSY,
44 RXRPC_SKB_MARK_REJECT_ABORT,
45};
46
47
48
49
50enum {
51 RXRPC_UNBOUND = 0,
52 RXRPC_CLIENT_UNBOUND,
53 RXRPC_CLIENT_BOUND,
54 RXRPC_SERVER_BOUND,
55 RXRPC_SERVER_BOUND2,
56 RXRPC_SERVER_LISTENING,
57 RXRPC_SERVER_LISTEN_DISABLED,
58 RXRPC_CLOSE,
59};
60
61
62
63
64struct rxrpc_net {
65 struct proc_dir_entry *proc_net;
66 u32 epoch;
67 struct list_head calls;
68 rwlock_t call_lock;
69 atomic_t nr_calls;
70
71 atomic_t nr_conns;
72 struct list_head conn_proc_list;
73 struct list_head service_conns;
74 rwlock_t conn_lock;
75 struct work_struct service_conn_reaper;
76 struct timer_list service_conn_reap_timer;
77
78 unsigned int nr_client_conns;
79 unsigned int nr_active_client_conns;
80 bool kill_all_client_conns;
81 bool live;
82 spinlock_t client_conn_cache_lock;
83 spinlock_t client_conn_discard_lock;
84 struct list_head waiting_client_conns;
85 struct list_head active_client_conns;
86 struct list_head idle_client_conns;
87 struct work_struct client_conn_reaper;
88 struct timer_list client_conn_reap_timer;
89
90 struct list_head local_endpoints;
91 struct mutex local_mutex;
92
93 DECLARE_HASHTABLE (peer_hash, 10);
94 spinlock_t peer_hash_lock;
95
96#define RXRPC_KEEPALIVE_TIME 20
97 u8 peer_keepalive_cursor;
98 time64_t peer_keepalive_base;
99 struct list_head peer_keepalive[32];
100 struct list_head peer_keepalive_new;
101 struct timer_list peer_keepalive_timer;
102 struct work_struct peer_keepalive_work;
103};
104
105
106
107
108
109
110
111
112
113struct rxrpc_backlog {
114 unsigned short peer_backlog_head;
115 unsigned short peer_backlog_tail;
116 unsigned short conn_backlog_head;
117 unsigned short conn_backlog_tail;
118 unsigned short call_backlog_head;
119 unsigned short call_backlog_tail;
120#define RXRPC_BACKLOG_MAX 32
121 struct rxrpc_peer *peer_backlog[RXRPC_BACKLOG_MAX];
122 struct rxrpc_connection *conn_backlog[RXRPC_BACKLOG_MAX];
123 struct rxrpc_call *call_backlog[RXRPC_BACKLOG_MAX];
124};
125
126
127
128
129struct rxrpc_sock {
130
131 struct sock sk;
132 rxrpc_notify_new_call_t notify_new_call;
133 rxrpc_discard_new_call_t discard_new_call;
134 struct rxrpc_local *local;
135 struct rxrpc_backlog *backlog;
136 spinlock_t incoming_lock;
137 struct list_head sock_calls;
138 struct list_head to_be_accepted;
139 struct list_head recvmsg_q;
140 rwlock_t recvmsg_lock;
141 struct key *key;
142 struct key *securities;
143 struct rb_root calls;
144 unsigned long flags;
145#define RXRPC_SOCK_CONNECTED 0
146 rwlock_t call_lock;
147 u32 min_sec_level;
148#define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
149 bool exclusive;
150 u16 second_service;
151 struct {
152
153 u16 from;
154 u16 to;
155 } service_upgrade;
156 sa_family_t family;
157 struct sockaddr_rxrpc srx;
158 struct sockaddr_rxrpc connect_srx;
159};
160
161#define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
162
163
164
165
166struct rxrpc_host_header {
167 u32 epoch;
168 u32 cid;
169 u32 callNumber;
170 u32 seq;
171 u32 serial;
172 u8 type;
173 u8 flags;
174 u8 userStatus;
175 u8 securityIndex;
176 union {
177 u16 _rsvd;
178 u16 cksum;
179 };
180 u16 serviceId;
181} __packed;
182
183
184
185
186
187struct rxrpc_skb_priv {
188 atomic_t nr_ring_pins;
189 u8 nr_subpackets;
190 u8 rx_flags;
191#define RXRPC_SKB_INCL_LAST 0x01
192#define RXRPC_SKB_TX_BUFFER 0x02
193 union {
194 int remain;
195
196
197 unsigned long rx_req_ack[(RXRPC_MAX_NR_JUMBO + BITS_PER_LONG - 1) /
198 BITS_PER_LONG];
199 };
200
201 struct rxrpc_host_header hdr;
202};
203
204#define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
205
206
207
208
209struct rxrpc_security {
210 const char *name;
211 u8 security_index;
212
213
214 int (*init)(void);
215
216
217 void (*exit)(void);
218
219
220 int (*init_connection_security)(struct rxrpc_connection *);
221
222
223 int (*prime_packet_security)(struct rxrpc_connection *);
224
225
226 int (*secure_packet)(struct rxrpc_call *,
227 struct sk_buff *,
228 size_t,
229 void *);
230
231
232 int (*verify_packet)(struct rxrpc_call *, struct sk_buff *,
233 unsigned int, unsigned int, rxrpc_seq_t, u16);
234
235
236 void (*free_call_crypto)(struct rxrpc_call *);
237
238
239 void (*locate_data)(struct rxrpc_call *, struct sk_buff *,
240 unsigned int *, unsigned int *);
241
242
243 int (*issue_challenge)(struct rxrpc_connection *);
244
245
246 int (*respond_to_challenge)(struct rxrpc_connection *,
247 struct sk_buff *,
248 u32 *);
249
250
251 int (*verify_response)(struct rxrpc_connection *,
252 struct sk_buff *,
253 u32 *);
254
255
256 void (*clear)(struct rxrpc_connection *);
257};
258
259
260
261
262
263
264struct rxrpc_local {
265 struct rcu_head rcu;
266 atomic_t active_users;
267 atomic_t usage;
268 struct rxrpc_net *rxnet;
269 struct list_head link;
270 struct socket *socket;
271 struct work_struct processor;
272 struct rxrpc_sock __rcu *service;
273 struct rw_semaphore defrag_sem;
274 struct sk_buff_head reject_queue;
275 struct sk_buff_head event_queue;
276 struct rb_root client_conns;
277 spinlock_t client_conns_lock;
278 spinlock_t lock;
279 rwlock_t services_lock;
280 int debug_id;
281 bool dead;
282 bool service_closed;
283 struct sockaddr_rxrpc srx;
284};
285
286
287
288
289
290struct rxrpc_peer {
291 struct rcu_head rcu;
292 atomic_t usage;
293 unsigned long hash_key;
294 struct hlist_node hash_link;
295 struct rxrpc_local *local;
296 struct hlist_head error_targets;
297 struct rb_root service_conns;
298 struct list_head keepalive_link;
299 time64_t last_tx_at;
300 seqlock_t service_conn_lock;
301 spinlock_t lock;
302 unsigned int if_mtu;
303 unsigned int mtu;
304 unsigned int maxdata;
305 unsigned short hdrsize;
306 int debug_id;
307 struct sockaddr_rxrpc srx;
308
309
310#define RXRPC_RTT_CACHE_SIZE 32
311 spinlock_t rtt_input_lock;
312 ktime_t rtt_last_req;
313 u64 rtt;
314 u64 rtt_sum;
315 u64 rtt_cache[RXRPC_RTT_CACHE_SIZE];
316 u8 rtt_cursor;
317 u8 rtt_usage;
318
319 u8 cong_cwnd;
320};
321
322
323
324
325struct rxrpc_conn_proto {
326 union {
327 struct {
328 u32 epoch;
329 u32 cid;
330 };
331 u64 index_key;
332 };
333};
334
335struct rxrpc_conn_parameters {
336 struct rxrpc_local *local;
337 struct rxrpc_peer *peer;
338 struct key *key;
339 bool exclusive;
340 bool upgrade;
341 u16 service_id;
342 u32 security_level;
343};
344
345
346
347
348enum rxrpc_conn_flag {
349 RXRPC_CONN_HAS_IDR,
350 RXRPC_CONN_IN_SERVICE_CONNS,
351 RXRPC_CONN_IN_CLIENT_CONNS,
352 RXRPC_CONN_EXPOSED,
353 RXRPC_CONN_DONT_REUSE,
354 RXRPC_CONN_COUNTED,
355 RXRPC_CONN_PROBING_FOR_UPGRADE,
356 RXRPC_CONN_FINAL_ACK_0,
357 RXRPC_CONN_FINAL_ACK_1,
358 RXRPC_CONN_FINAL_ACK_2,
359 RXRPC_CONN_FINAL_ACK_3,
360};
361
362#define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) | \
363 (1UL << RXRPC_CONN_FINAL_ACK_1) | \
364 (1UL << RXRPC_CONN_FINAL_ACK_2) | \
365 (1UL << RXRPC_CONN_FINAL_ACK_3))
366
367
368
369
370enum rxrpc_conn_event {
371 RXRPC_CONN_EV_CHALLENGE,
372};
373
374
375
376
377enum rxrpc_conn_cache_state {
378 RXRPC_CONN_CLIENT_INACTIVE,
379 RXRPC_CONN_CLIENT_WAITING,
380 RXRPC_CONN_CLIENT_ACTIVE,
381 RXRPC_CONN_CLIENT_UPGRADE,
382 RXRPC_CONN_CLIENT_CULLED,
383 RXRPC_CONN_CLIENT_IDLE,
384 RXRPC_CONN__NR_CACHE_STATES
385};
386
387
388
389
390enum rxrpc_conn_proto_state {
391 RXRPC_CONN_UNUSED,
392 RXRPC_CONN_CLIENT,
393 RXRPC_CONN_SERVICE_PREALLOC,
394 RXRPC_CONN_SERVICE_UNSECURED,
395 RXRPC_CONN_SERVICE_CHALLENGING,
396 RXRPC_CONN_SERVICE,
397 RXRPC_CONN_REMOTELY_ABORTED,
398 RXRPC_CONN_LOCALLY_ABORTED,
399 RXRPC_CONN__NR_STATES
400};
401
402
403
404
405
406
407struct rxrpc_connection {
408 struct rxrpc_conn_proto proto;
409 struct rxrpc_conn_parameters params;
410
411 atomic_t usage;
412 struct rcu_head rcu;
413 struct list_head cache_link;
414
415 spinlock_t channel_lock;
416 unsigned char active_chans;
417#define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
418 struct list_head waiting_calls;
419 struct rxrpc_channel {
420 unsigned long final_ack_at;
421 struct rxrpc_call __rcu *call;
422 unsigned int call_debug_id;
423 u32 call_id;
424 u32 call_counter;
425 u32 last_call;
426 u8 last_type;
427 union {
428 u32 last_seq;
429 u32 last_abort;
430 };
431 } channels[RXRPC_MAXCALLS];
432
433 struct timer_list timer;
434 struct work_struct processor;
435 union {
436 struct rb_node client_node;
437 struct rb_node service_node;
438 };
439 struct list_head proc_link;
440 struct list_head link;
441 struct sk_buff_head rx_queue;
442 const struct rxrpc_security *security;
443 struct key *server_key;
444 struct crypto_sync_skcipher *cipher;
445 struct rxrpc_crypt csum_iv;
446 unsigned long flags;
447 unsigned long events;
448 unsigned long idle_timestamp;
449 spinlock_t state_lock;
450 enum rxrpc_conn_cache_state cache_state;
451 enum rxrpc_conn_proto_state state;
452 u32 abort_code;
453 int debug_id;
454 atomic_t serial;
455 unsigned int hi_serial;
456 u32 security_nonce;
457 u32 service_id;
458 u8 size_align;
459 u8 security_size;
460 u8 security_ix;
461 u8 out_clientflag;
462 short error;
463};
464
465static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
466{
467 return sp->hdr.flags & RXRPC_CLIENT_INITIATED;
468}
469
470static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp)
471{
472 return !rxrpc_to_server(sp);
473}
474
475
476
477
478enum rxrpc_call_flag {
479 RXRPC_CALL_RELEASED,
480 RXRPC_CALL_HAS_USERID,
481 RXRPC_CALL_IS_SERVICE,
482 RXRPC_CALL_EXPOSED,
483 RXRPC_CALL_RX_LAST,
484 RXRPC_CALL_TX_LAST,
485 RXRPC_CALL_SEND_PING,
486 RXRPC_CALL_PINGING,
487 RXRPC_CALL_RETRANS_TIMEOUT,
488 RXRPC_CALL_BEGAN_RX_TIMER,
489 RXRPC_CALL_RX_HEARD,
490 RXRPC_CALL_RX_UNDERRUN,
491 RXRPC_CALL_IS_INTR,
492};
493
494
495
496
497enum rxrpc_call_event {
498 RXRPC_CALL_EV_ACK,
499 RXRPC_CALL_EV_ABORT,
500 RXRPC_CALL_EV_RESEND,
501 RXRPC_CALL_EV_PING,
502 RXRPC_CALL_EV_EXPIRED,
503 RXRPC_CALL_EV_ACK_LOST,
504};
505
506
507
508
509enum rxrpc_call_state {
510 RXRPC_CALL_UNINITIALISED,
511 RXRPC_CALL_CLIENT_AWAIT_CONN,
512 RXRPC_CALL_CLIENT_SEND_REQUEST,
513 RXRPC_CALL_CLIENT_AWAIT_REPLY,
514 RXRPC_CALL_CLIENT_RECV_REPLY,
515 RXRPC_CALL_SERVER_PREALLOC,
516 RXRPC_CALL_SERVER_SECURING,
517 RXRPC_CALL_SERVER_ACCEPTING,
518 RXRPC_CALL_SERVER_RECV_REQUEST,
519 RXRPC_CALL_SERVER_ACK_REQUEST,
520 RXRPC_CALL_SERVER_SEND_REPLY,
521 RXRPC_CALL_SERVER_AWAIT_ACK,
522 RXRPC_CALL_COMPLETE,
523 NR__RXRPC_CALL_STATES
524};
525
526
527
528
529enum rxrpc_call_completion {
530 RXRPC_CALL_SUCCEEDED,
531 RXRPC_CALL_REMOTELY_ABORTED,
532 RXRPC_CALL_LOCALLY_ABORTED,
533 RXRPC_CALL_LOCAL_ERROR,
534 RXRPC_CALL_NETWORK_ERROR,
535 NR__RXRPC_CALL_COMPLETIONS
536};
537
538
539
540
541enum rxrpc_congest_mode {
542 RXRPC_CALL_SLOW_START,
543 RXRPC_CALL_CONGEST_AVOIDANCE,
544 RXRPC_CALL_PACKET_LOSS,
545 RXRPC_CALL_FAST_RETRANSMIT,
546 NR__RXRPC_CONGEST_MODES
547};
548
549
550
551
552
553struct rxrpc_call {
554 struct rcu_head rcu;
555 struct rxrpc_connection *conn;
556 struct rxrpc_peer *peer;
557 struct rxrpc_sock __rcu *socket;
558 struct rxrpc_net *rxnet;
559 const struct rxrpc_security *security;
560 struct mutex user_mutex;
561 unsigned long ack_at;
562 unsigned long ack_lost_at;
563 unsigned long resend_at;
564 unsigned long ping_at;
565 unsigned long keepalive_at;
566 unsigned long expect_rx_by;
567 unsigned long expect_req_by;
568 unsigned long expect_term_by;
569 u32 next_rx_timo;
570 u32 next_req_timo;
571 struct skcipher_request *cipher_req;
572 struct timer_list timer;
573 struct work_struct processor;
574 rxrpc_notify_rx_t notify_rx;
575 struct list_head link;
576 struct list_head chan_wait_link;
577 struct hlist_node error_link;
578 struct list_head accept_link;
579 struct list_head recvmsg_link;
580 struct list_head sock_link;
581 struct rb_node sock_node;
582 struct sk_buff *tx_pending;
583 wait_queue_head_t waitq;
584 s64 tx_total_len;
585 __be32 crypto_buf[2];
586 unsigned long user_call_ID;
587 unsigned long flags;
588 unsigned long events;
589 spinlock_t lock;
590 spinlock_t notify_lock;
591 rwlock_t state_lock;
592 u32 abort_code;
593 int error;
594 enum rxrpc_call_state state;
595 enum rxrpc_call_completion completion;
596 atomic_t usage;
597 u16 service_id;
598 u8 security_ix;
599 u32 call_id;
600 u32 cid;
601 int debug_id;
602 unsigned short rx_pkt_offset;
603 unsigned short rx_pkt_len;
604 bool rx_pkt_last;
605
606
607
608
609
610
611
612
613
614
615#define RXRPC_RXTX_BUFF_SIZE 64
616#define RXRPC_RXTX_BUFF_MASK (RXRPC_RXTX_BUFF_SIZE - 1)
617#define RXRPC_INIT_RX_WINDOW_SIZE 63
618 struct sk_buff **rxtx_buffer;
619 u8 *rxtx_annotations;
620#define RXRPC_TX_ANNO_ACK 0
621#define RXRPC_TX_ANNO_UNACK 1
622#define RXRPC_TX_ANNO_NAK 2
623#define RXRPC_TX_ANNO_RETRANS 3
624#define RXRPC_TX_ANNO_MASK 0x03
625#define RXRPC_TX_ANNO_LAST 0x04
626#define RXRPC_TX_ANNO_RESENT 0x08
627
628#define RXRPC_RX_ANNO_SUBPACKET 0x3f
629#define RXRPC_RX_ANNO_VERIFIED 0x80
630 rxrpc_seq_t tx_hard_ack;
631
632
633 rxrpc_seq_t tx_top;
634 u16 tx_backoff;
635
636
637
638
639
640#define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN
641 u8 cong_cwnd;
642 u8 cong_extra;
643 u8 cong_ssthresh;
644 enum rxrpc_congest_mode cong_mode:8;
645 u8 cong_dup_acks;
646 u8 cong_cumul_acks;
647 ktime_t cong_tstamp;
648
649 rxrpc_seq_t rx_hard_ack;
650
651
652 rxrpc_seq_t rx_top;
653 rxrpc_seq_t rx_expect_next;
654 rxrpc_serial_t rx_serial;
655 u8 rx_winsize;
656 u8 tx_winsize;
657 bool tx_phase;
658 u8 nr_jumbo_bad;
659
660 spinlock_t input_lock;
661
662
663 u8 ackr_reason;
664 rxrpc_serial_t ackr_serial;
665 rxrpc_serial_t ackr_first_seq;
666 rxrpc_seq_t ackr_prev_seq;
667 rxrpc_seq_t ackr_consumed;
668 rxrpc_seq_t ackr_seen;
669
670
671 rxrpc_serial_t ping_serial;
672 ktime_t ping_time;
673
674
675 ktime_t acks_latest_ts;
676 rxrpc_serial_t acks_latest;
677 rxrpc_seq_t acks_lowest_nak;
678 rxrpc_seq_t acks_lost_top;
679 rxrpc_serial_t acks_lost_ping;
680};
681
682
683
684
685struct rxrpc_ack_summary {
686 u8 ack_reason;
687 u8 nr_acks;
688 u8 nr_nacks;
689 u8 nr_new_acks;
690 u8 nr_new_nacks;
691 u8 nr_rot_new_acks;
692 bool new_low_nack;
693 bool retrans_timeo;
694 u8 flight_size;
695
696 enum rxrpc_congest_mode mode:8;
697 u8 cwnd;
698 u8 ssthresh;
699 u8 dup_acks;
700 u8 cumulative_acks;
701};
702
703
704
705
706enum rxrpc_command {
707 RXRPC_CMD_SEND_DATA,
708 RXRPC_CMD_SEND_ABORT,
709 RXRPC_CMD_ACCEPT,
710 RXRPC_CMD_REJECT_BUSY,
711};
712
713struct rxrpc_call_params {
714 s64 tx_total_len;
715 unsigned long user_call_ID;
716 struct {
717 u32 hard;
718 u32 idle;
719 u32 normal;
720 } timeouts;
721 u8 nr_timeouts;
722 bool intr;
723};
724
725struct rxrpc_send_params {
726 struct rxrpc_call_params call;
727 u32 abort_code;
728 enum rxrpc_command command : 8;
729 bool exclusive;
730 bool upgrade;
731};
732
733#include <trace/events/rxrpc.h>
734
735
736
737
738extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
739extern struct workqueue_struct *rxrpc_workqueue;
740
741
742
743
744int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
745void rxrpc_discard_prealloc(struct rxrpc_sock *);
746struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
747 struct rxrpc_sock *,
748 struct sk_buff *);
749void rxrpc_accept_incoming_calls(struct rxrpc_local *);
750struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
751 rxrpc_notify_rx_t);
752int rxrpc_reject_call(struct rxrpc_sock *);
753
754
755
756
757void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool, bool,
758 enum rxrpc_propose_ack_trace);
759void rxrpc_process_call(struct work_struct *);
760
761static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call,
762 unsigned long expire_at,
763 unsigned long now,
764 enum rxrpc_timer_trace why)
765{
766 trace_rxrpc_timer(call, why, now);
767 timer_reduce(&call->timer, expire_at);
768}
769
770
771
772
773extern const char *const rxrpc_call_states[];
774extern const char *const rxrpc_call_completions[];
775extern unsigned int rxrpc_max_call_lifetime;
776extern struct kmem_cache *rxrpc_call_jar;
777
778struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
779struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t, unsigned int);
780struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
781 struct rxrpc_conn_parameters *,
782 struct sockaddr_rxrpc *,
783 struct rxrpc_call_params *, gfp_t,
784 unsigned int);
785void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
786 struct sk_buff *);
787void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
788void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
789bool __rxrpc_queue_call(struct rxrpc_call *);
790bool rxrpc_queue_call(struct rxrpc_call *);
791void rxrpc_see_call(struct rxrpc_call *);
792void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
793void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
794void rxrpc_cleanup_call(struct rxrpc_call *);
795void rxrpc_destroy_all_calls(struct rxrpc_net *);
796
797static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
798{
799 return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags);
800}
801
802static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
803{
804 return !rxrpc_is_service_call(call);
805}
806
807
808
809
810static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call,
811 enum rxrpc_call_completion compl,
812 u32 abort_code,
813 int error)
814{
815 if (call->state < RXRPC_CALL_COMPLETE) {
816 call->abort_code = abort_code;
817 call->error = error;
818 call->completion = compl,
819 call->state = RXRPC_CALL_COMPLETE;
820 trace_rxrpc_call_complete(call);
821 wake_up(&call->waitq);
822 return true;
823 }
824 return false;
825}
826
827static inline bool rxrpc_set_call_completion(struct rxrpc_call *call,
828 enum rxrpc_call_completion compl,
829 u32 abort_code,
830 int error)
831{
832 bool ret;
833
834 write_lock_bh(&call->state_lock);
835 ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
836 write_unlock_bh(&call->state_lock);
837 return ret;
838}
839
840
841
842
843static inline bool __rxrpc_call_completed(struct rxrpc_call *call)
844{
845 return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
846}
847
848static inline bool rxrpc_call_completed(struct rxrpc_call *call)
849{
850 bool ret;
851
852 write_lock_bh(&call->state_lock);
853 ret = __rxrpc_call_completed(call);
854 write_unlock_bh(&call->state_lock);
855 return ret;
856}
857
858
859
860
861static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
862 rxrpc_seq_t seq,
863 u32 abort_code, int error)
864{
865 trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
866 abort_code, error);
867 return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
868 abort_code, error);
869}
870
871static inline bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
872 rxrpc_seq_t seq, u32 abort_code, int error)
873{
874 bool ret;
875
876 write_lock_bh(&call->state_lock);
877 ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
878 write_unlock_bh(&call->state_lock);
879 return ret;
880}
881
882
883
884
885static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
886 struct sk_buff *skb,
887 const char *eproto_why,
888 const char *why,
889 u32 abort_code)
890{
891 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
892
893 trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why);
894 return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO);
895}
896
897#define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \
898 __rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \
899 (abort_why), (abort_code))
900
901
902
903
904extern unsigned int rxrpc_max_client_connections;
905extern unsigned int rxrpc_reap_client_connections;
906extern unsigned long rxrpc_conn_idle_client_expiry;
907extern unsigned long rxrpc_conn_idle_client_fast_expiry;
908extern struct idr rxrpc_client_conn_ids;
909
910void rxrpc_destroy_client_conn_ids(void);
911int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *,
912 struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *,
913 gfp_t);
914void rxrpc_expose_client_call(struct rxrpc_call *);
915void rxrpc_disconnect_client_call(struct rxrpc_call *);
916void rxrpc_put_client_conn(struct rxrpc_connection *);
917void rxrpc_discard_expired_client_conns(struct work_struct *);
918void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
919void rxrpc_clean_up_local_conns(struct rxrpc_local *);
920
921
922
923
924void rxrpc_process_connection(struct work_struct *);
925
926
927
928
929extern unsigned int rxrpc_connection_expiry;
930extern unsigned int rxrpc_closed_conn_expiry;
931
932struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
933struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
934 struct sk_buff *,
935 struct rxrpc_peer **);
936void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
937void rxrpc_disconnect_call(struct rxrpc_call *);
938void rxrpc_kill_connection(struct rxrpc_connection *);
939bool rxrpc_queue_conn(struct rxrpc_connection *);
940void rxrpc_see_connection(struct rxrpc_connection *);
941void rxrpc_get_connection(struct rxrpc_connection *);
942struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *);
943void rxrpc_put_service_conn(struct rxrpc_connection *);
944void rxrpc_service_connection_reaper(struct work_struct *);
945void rxrpc_destroy_all_connections(struct rxrpc_net *);
946
947static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
948{
949 return conn->out_clientflag;
950}
951
952static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
953{
954 return !rxrpc_conn_is_client(conn);
955}
956
957static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
958{
959 if (!conn)
960 return;
961
962 if (rxrpc_conn_is_client(conn))
963 rxrpc_put_client_conn(conn);
964 else
965 rxrpc_put_service_conn(conn);
966}
967
968static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
969 unsigned long expire_at)
970{
971 timer_reduce(&conn->timer, expire_at);
972}
973
974
975
976
977struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
978 struct sk_buff *);
979struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t);
980void rxrpc_new_incoming_connection(struct rxrpc_sock *,
981 struct rxrpc_connection *, struct sk_buff *);
982void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
983
984
985
986
987int rxrpc_input_packet(struct sock *, struct sk_buff *);
988
989
990
991
992extern const struct rxrpc_security rxrpc_no_security;
993
994
995
996
997extern struct key_type key_type_rxrpc;
998extern struct key_type key_type_rxrpc_s;
999
1000int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
1001int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
1002int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time64_t,
1003 u32);
1004
1005
1006
1007
1008extern void rxrpc_process_local_events(struct rxrpc_local *);
1009
1010
1011
1012
1013struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *);
1014struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
1015struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
1016void rxrpc_put_local(struct rxrpc_local *);
1017struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
1018void rxrpc_unuse_local(struct rxrpc_local *);
1019void rxrpc_queue_local(struct rxrpc_local *);
1020void rxrpc_destroy_all_locals(struct rxrpc_net *);
1021
1022
1023
1024
1025extern unsigned int rxrpc_max_backlog __read_mostly;
1026extern unsigned long rxrpc_requested_ack_delay;
1027extern unsigned long rxrpc_soft_ack_delay;
1028extern unsigned long rxrpc_idle_ack_delay;
1029extern unsigned int rxrpc_rx_window_size;
1030extern unsigned int rxrpc_rx_mtu;
1031extern unsigned int rxrpc_rx_jumbo_max;
1032extern unsigned long rxrpc_resend_timeout;
1033
1034extern const s8 rxrpc_ack_priority[];
1035
1036
1037
1038
1039extern unsigned int rxrpc_net_id;
1040extern struct pernet_operations rxrpc_net_ops;
1041
1042static inline struct rxrpc_net *rxrpc_net(struct net *net)
1043{
1044 return net_generic(net, rxrpc_net_id);
1045}
1046
1047
1048
1049
1050int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *);
1051int rxrpc_send_abort_packet(struct rxrpc_call *);
1052int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
1053void rxrpc_reject_packets(struct rxrpc_local *);
1054void rxrpc_send_keepalive(struct rxrpc_peer *);
1055
1056
1057
1058
1059void rxrpc_error_report(struct sock *);
1060void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
1061 rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
1062void rxrpc_peer_keepalive_worker(struct work_struct *);
1063
1064
1065
1066
1067struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
1068 const struct sockaddr_rxrpc *);
1069struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *,
1070 struct sockaddr_rxrpc *, gfp_t);
1071struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
1072void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *,
1073 struct rxrpc_peer *);
1074void rxrpc_destroy_all_peers(struct rxrpc_net *);
1075struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
1076struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
1077void rxrpc_put_peer(struct rxrpc_peer *);
1078void rxrpc_put_peer_locked(struct rxrpc_peer *);
1079
1080
1081
1082
1083extern const struct seq_operations rxrpc_call_seq_ops;
1084extern const struct seq_operations rxrpc_connection_seq_ops;
1085extern const struct seq_operations rxrpc_peer_seq_ops;
1086
1087
1088
1089
1090void rxrpc_notify_socket(struct rxrpc_call *);
1091int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
1092
1093
1094
1095
1096#ifdef CONFIG_RXKAD
1097extern const struct rxrpc_security rxkad;
1098#endif
1099
1100
1101
1102
1103int __init rxrpc_init_security(void);
1104void rxrpc_exit_security(void);
1105int rxrpc_init_client_conn_security(struct rxrpc_connection *);
1106int rxrpc_init_server_conn_security(struct rxrpc_connection *);
1107
1108
1109
1110
1111int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
1112
1113
1114
1115
1116void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
1117void rxrpc_packet_destructor(struct sk_buff *);
1118void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
1119void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
1120void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace);
1121void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
1122void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
1123void rxrpc_purge_queue(struct sk_buff_head *);
1124
1125
1126
1127
1128#ifdef CONFIG_SYSCTL
1129extern int __init rxrpc_sysctl_init(void);
1130extern void rxrpc_sysctl_exit(void);
1131#else
1132static inline int __init rxrpc_sysctl_init(void) { return 0; }
1133static inline void rxrpc_sysctl_exit(void) {}
1134#endif
1135
1136
1137
1138
1139int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
1140
1141static inline bool before(u32 seq1, u32 seq2)
1142{
1143 return (s32)(seq1 - seq2) < 0;
1144}
1145static inline bool before_eq(u32 seq1, u32 seq2)
1146{
1147 return (s32)(seq1 - seq2) <= 0;
1148}
1149static inline bool after(u32 seq1, u32 seq2)
1150{
1151 return (s32)(seq1 - seq2) > 0;
1152}
1153static inline bool after_eq(u32 seq1, u32 seq2)
1154{
1155 return (s32)(seq1 - seq2) >= 0;
1156}
1157
1158
1159
1160
1161extern unsigned int rxrpc_debug;
1162
1163#define dbgprintk(FMT,...) \
1164 printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
1165
1166#define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
1167#define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
1168#define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
1169#define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
1170#define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
1171
1172
1173#if defined(__KDEBUG)
1174#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
1175#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
1176#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
1177#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
1178#define _net(FMT,...) knet(FMT,##__VA_ARGS__)
1179
1180#elif defined(CONFIG_AF_RXRPC_DEBUG)
1181#define RXRPC_DEBUG_KENTER 0x01
1182#define RXRPC_DEBUG_KLEAVE 0x02
1183#define RXRPC_DEBUG_KDEBUG 0x04
1184#define RXRPC_DEBUG_KPROTO 0x08
1185#define RXRPC_DEBUG_KNET 0x10
1186
1187#define _enter(FMT,...) \
1188do { \
1189 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
1190 kenter(FMT,##__VA_ARGS__); \
1191} while (0)
1192
1193#define _leave(FMT,...) \
1194do { \
1195 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
1196 kleave(FMT,##__VA_ARGS__); \
1197} while (0)
1198
1199#define _debug(FMT,...) \
1200do { \
1201 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
1202 kdebug(FMT,##__VA_ARGS__); \
1203} while (0)
1204
1205#define _proto(FMT,...) \
1206do { \
1207 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
1208 kproto(FMT,##__VA_ARGS__); \
1209} while (0)
1210
1211#define _net(FMT,...) \
1212do { \
1213 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
1214 knet(FMT,##__VA_ARGS__); \
1215} while (0)
1216
1217#else
1218#define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
1219#define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
1220#define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
1221#define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
1222#define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
1223#endif
1224
1225
1226
1227
1228#if 1
1229
1230#define ASSERT(X) \
1231do { \
1232 if (unlikely(!(X))) { \
1233 pr_err("Assertion failed\n"); \
1234 BUG(); \
1235 } \
1236} while (0)
1237
1238#define ASSERTCMP(X, OP, Y) \
1239do { \
1240 __typeof__(X) _x = (X); \
1241 __typeof__(Y) _y = (__typeof__(X))(Y); \
1242 if (unlikely(!(_x OP _y))) { \
1243 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
1244 (unsigned long)_x, (unsigned long)_x, #OP, \
1245 (unsigned long)_y, (unsigned long)_y); \
1246 BUG(); \
1247 } \
1248} while (0)
1249
1250#define ASSERTIF(C, X) \
1251do { \
1252 if (unlikely((C) && !(X))) { \
1253 pr_err("Assertion failed\n"); \
1254 BUG(); \
1255 } \
1256} while (0)
1257
1258#define ASSERTIFCMP(C, X, OP, Y) \
1259do { \
1260 __typeof__(X) _x = (X); \
1261 __typeof__(Y) _y = (__typeof__(X))(Y); \
1262 if (unlikely((C) && !(_x OP _y))) { \
1263 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
1264 (unsigned long)_x, (unsigned long)_x, #OP, \
1265 (unsigned long)_y, (unsigned long)_y); \
1266 BUG(); \
1267 } \
1268} while (0)
1269
1270#else
1271
1272#define ASSERT(X) \
1273do { \
1274} while (0)
1275
1276#define ASSERTCMP(X, OP, Y) \
1277do { \
1278} while (0)
1279
1280#define ASSERTIF(C, X) \
1281do { \
1282} while (0)
1283
1284#define ASSERTIFCMP(C, X, OP, Y) \
1285do { \
1286} while (0)
1287
1288#endif
1289