1
2
3
4
5
6
7
8#include <linux/atomic.h>
9#include <linux/seqlock.h>
10#include <net/net_namespace.h>
11#include <net/netns/generic.h>
12#include <net/sock.h>
13#include <net/af_rxrpc.h>
14#include "protocol.h"
15
16#if 0
17#define CHECK_SLAB_OKAY(X) \
18 BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
19 (POISON_FREE << 8 | POISON_FREE))
20#else
21#define CHECK_SLAB_OKAY(X) do {} while (0)
22#endif
23
24#define FCRYPT_BSIZE 8
25struct rxrpc_crypt {
26 union {
27 u8 x[FCRYPT_BSIZE];
28 __be32 n[2];
29 };
30} __attribute__((aligned(8)));
31
32#define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
33#define rxrpc_queue_delayed_work(WS,D) \
34 queue_delayed_work(rxrpc_workqueue, (WS), (D))
35
36struct rxrpc_connection;
37
38
39
40
41
42enum rxrpc_skb_mark {
43 RXRPC_SKB_MARK_REJECT_BUSY,
44 RXRPC_SKB_MARK_REJECT_ABORT,
45};
46
47
48
49
50enum {
51 RXRPC_UNBOUND = 0,
52 RXRPC_CLIENT_UNBOUND,
53 RXRPC_CLIENT_BOUND,
54 RXRPC_SERVER_BOUND,
55 RXRPC_SERVER_BOUND2,
56 RXRPC_SERVER_LISTENING,
57 RXRPC_SERVER_LISTEN_DISABLED,
58 RXRPC_CLOSE,
59};
60
61
62
63
64struct rxrpc_net {
65 struct proc_dir_entry *proc_net;
66 u32 epoch;
67 struct list_head calls;
68 rwlock_t call_lock;
69 atomic_t nr_calls;
70
71 atomic_t nr_conns;
72 struct list_head conn_proc_list;
73 struct list_head service_conns;
74 rwlock_t conn_lock;
75 struct work_struct service_conn_reaper;
76 struct timer_list service_conn_reap_timer;
77
78 unsigned int nr_client_conns;
79 unsigned int nr_active_client_conns;
80 bool kill_all_client_conns;
81 bool live;
82 spinlock_t client_conn_cache_lock;
83 spinlock_t client_conn_discard_lock;
84 struct list_head waiting_client_conns;
85 struct list_head active_client_conns;
86 struct list_head idle_client_conns;
87 struct work_struct client_conn_reaper;
88 struct timer_list client_conn_reap_timer;
89
90 struct list_head local_endpoints;
91 struct mutex local_mutex;
92
93 DECLARE_HASHTABLE (peer_hash, 10);
94 spinlock_t peer_hash_lock;
95
96#define RXRPC_KEEPALIVE_TIME 20
97 u8 peer_keepalive_cursor;
98 time64_t peer_keepalive_base;
99 struct list_head peer_keepalive[32];
100 struct list_head peer_keepalive_new;
101 struct timer_list peer_keepalive_timer;
102 struct work_struct peer_keepalive_work;
103};
104
105
106
107
108
109
110
111
112
113struct rxrpc_backlog {
114 unsigned short peer_backlog_head;
115 unsigned short peer_backlog_tail;
116 unsigned short conn_backlog_head;
117 unsigned short conn_backlog_tail;
118 unsigned short call_backlog_head;
119 unsigned short call_backlog_tail;
120#define RXRPC_BACKLOG_MAX 32
121 struct rxrpc_peer *peer_backlog[RXRPC_BACKLOG_MAX];
122 struct rxrpc_connection *conn_backlog[RXRPC_BACKLOG_MAX];
123 struct rxrpc_call *call_backlog[RXRPC_BACKLOG_MAX];
124};
125
126
127
128
129struct rxrpc_sock {
130
131 struct sock sk;
132 rxrpc_notify_new_call_t notify_new_call;
133 rxrpc_discard_new_call_t discard_new_call;
134 struct rxrpc_local *local;
135 struct rxrpc_backlog *backlog;
136 spinlock_t incoming_lock;
137 struct list_head sock_calls;
138 struct list_head to_be_accepted;
139 struct list_head recvmsg_q;
140 rwlock_t recvmsg_lock;
141 struct key *key;
142 struct key *securities;
143 struct rb_root calls;
144 unsigned long flags;
145#define RXRPC_SOCK_CONNECTED 0
146 rwlock_t call_lock;
147 u32 min_sec_level;
148#define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
149 bool exclusive;
150 u16 second_service;
151 struct {
152
153 u16 from;
154 u16 to;
155 } service_upgrade;
156 sa_family_t family;
157 struct sockaddr_rxrpc srx;
158 struct sockaddr_rxrpc connect_srx;
159};
160
161#define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
162
163
164
165
166struct rxrpc_host_header {
167 u32 epoch;
168 u32 cid;
169 u32 callNumber;
170 u32 seq;
171 u32 serial;
172 u8 type;
173 u8 flags;
174 u8 userStatus;
175 u8 securityIndex;
176 union {
177 u16 _rsvd;
178 u16 cksum;
179 };
180 u16 serviceId;
181} __packed;
182
183
184
185
186
187struct rxrpc_skb_priv {
188 atomic_t nr_ring_pins;
189 u8 nr_subpackets;
190 u8 rx_flags;
191#define RXRPC_SKB_INCL_LAST 0x01
192#define RXRPC_SKB_TX_BUFFER 0x02
193 union {
194 int remain;
195
196
197 unsigned long rx_req_ack[(RXRPC_MAX_NR_JUMBO + BITS_PER_LONG - 1) /
198 BITS_PER_LONG];
199 };
200
201 struct rxrpc_host_header hdr;
202};
203
204#define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
205
206
207
208
209struct rxrpc_security {
210 const char *name;
211 u8 security_index;
212
213
214 int (*init)(void);
215
216
217 void (*exit)(void);
218
219
220 int (*init_connection_security)(struct rxrpc_connection *);
221
222
223 int (*prime_packet_security)(struct rxrpc_connection *);
224
225
226 int (*secure_packet)(struct rxrpc_call *,
227 struct sk_buff *,
228 size_t,
229 void *);
230
231
232 int (*verify_packet)(struct rxrpc_call *, struct sk_buff *,
233 unsigned int, unsigned int, rxrpc_seq_t, u16);
234
235
236 void (*locate_data)(struct rxrpc_call *, struct sk_buff *,
237 unsigned int *, unsigned int *);
238
239
240 int (*issue_challenge)(struct rxrpc_connection *);
241
242
243 int (*respond_to_challenge)(struct rxrpc_connection *,
244 struct sk_buff *,
245 u32 *);
246
247
248 int (*verify_response)(struct rxrpc_connection *,
249 struct sk_buff *,
250 u32 *);
251
252
253 void (*clear)(struct rxrpc_connection *);
254};
255
256
257
258
259
260
261struct rxrpc_local {
262 struct rcu_head rcu;
263 atomic_t active_users;
264 atomic_t usage;
265 struct rxrpc_net *rxnet;
266 struct list_head link;
267 struct socket *socket;
268 struct work_struct processor;
269 struct rxrpc_sock __rcu *service;
270 struct rw_semaphore defrag_sem;
271 struct sk_buff_head reject_queue;
272 struct sk_buff_head event_queue;
273 struct rb_root client_conns;
274 spinlock_t client_conns_lock;
275 spinlock_t lock;
276 rwlock_t services_lock;
277 int debug_id;
278 bool dead;
279 bool service_closed;
280 struct sockaddr_rxrpc srx;
281};
282
283
284
285
286
287struct rxrpc_peer {
288 struct rcu_head rcu;
289 atomic_t usage;
290 unsigned long hash_key;
291 struct hlist_node hash_link;
292 struct rxrpc_local *local;
293 struct hlist_head error_targets;
294 struct rb_root service_conns;
295 struct list_head keepalive_link;
296 time64_t last_tx_at;
297 seqlock_t service_conn_lock;
298 spinlock_t lock;
299 unsigned int if_mtu;
300 unsigned int mtu;
301 unsigned int maxdata;
302 unsigned short hdrsize;
303 int debug_id;
304 struct sockaddr_rxrpc srx;
305
306
307#define RXRPC_RTT_CACHE_SIZE 32
308 spinlock_t rtt_input_lock;
309 ktime_t rtt_last_req;
310 u64 rtt;
311 u64 rtt_sum;
312 u64 rtt_cache[RXRPC_RTT_CACHE_SIZE];
313 u8 rtt_cursor;
314 u8 rtt_usage;
315
316 u8 cong_cwnd;
317};
318
319
320
321
322struct rxrpc_conn_proto {
323 union {
324 struct {
325 u32 epoch;
326 u32 cid;
327 };
328 u64 index_key;
329 };
330};
331
332struct rxrpc_conn_parameters {
333 struct rxrpc_local *local;
334 struct rxrpc_peer *peer;
335 struct key *key;
336 bool exclusive;
337 bool upgrade;
338 u16 service_id;
339 u32 security_level;
340};
341
342
343
344
345enum rxrpc_conn_flag {
346 RXRPC_CONN_HAS_IDR,
347 RXRPC_CONN_IN_SERVICE_CONNS,
348 RXRPC_CONN_IN_CLIENT_CONNS,
349 RXRPC_CONN_EXPOSED,
350 RXRPC_CONN_DONT_REUSE,
351 RXRPC_CONN_COUNTED,
352 RXRPC_CONN_PROBING_FOR_UPGRADE,
353 RXRPC_CONN_FINAL_ACK_0,
354 RXRPC_CONN_FINAL_ACK_1,
355 RXRPC_CONN_FINAL_ACK_2,
356 RXRPC_CONN_FINAL_ACK_3,
357};
358
359#define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) | \
360 (1UL << RXRPC_CONN_FINAL_ACK_1) | \
361 (1UL << RXRPC_CONN_FINAL_ACK_2) | \
362 (1UL << RXRPC_CONN_FINAL_ACK_3))
363
364
365
366
367enum rxrpc_conn_event {
368 RXRPC_CONN_EV_CHALLENGE,
369};
370
371
372
373
374enum rxrpc_conn_cache_state {
375 RXRPC_CONN_CLIENT_INACTIVE,
376 RXRPC_CONN_CLIENT_WAITING,
377 RXRPC_CONN_CLIENT_ACTIVE,
378 RXRPC_CONN_CLIENT_UPGRADE,
379 RXRPC_CONN_CLIENT_CULLED,
380 RXRPC_CONN_CLIENT_IDLE,
381 RXRPC_CONN__NR_CACHE_STATES
382};
383
384
385
386
387enum rxrpc_conn_proto_state {
388 RXRPC_CONN_UNUSED,
389 RXRPC_CONN_CLIENT,
390 RXRPC_CONN_SERVICE_PREALLOC,
391 RXRPC_CONN_SERVICE_UNSECURED,
392 RXRPC_CONN_SERVICE_CHALLENGING,
393 RXRPC_CONN_SERVICE,
394 RXRPC_CONN_REMOTELY_ABORTED,
395 RXRPC_CONN_LOCALLY_ABORTED,
396 RXRPC_CONN__NR_STATES
397};
398
399
400
401
402
403
404struct rxrpc_connection {
405 struct rxrpc_conn_proto proto;
406 struct rxrpc_conn_parameters params;
407
408 atomic_t usage;
409 struct rcu_head rcu;
410 struct list_head cache_link;
411
412 spinlock_t channel_lock;
413 unsigned char active_chans;
414#define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
415 struct list_head waiting_calls;
416 struct rxrpc_channel {
417 unsigned long final_ack_at;
418 struct rxrpc_call __rcu *call;
419 unsigned int call_debug_id;
420 u32 call_id;
421 u32 call_counter;
422 u32 last_call;
423 u8 last_type;
424 union {
425 u32 last_seq;
426 u32 last_abort;
427 };
428 } channels[RXRPC_MAXCALLS];
429
430 struct timer_list timer;
431 struct work_struct processor;
432 union {
433 struct rb_node client_node;
434 struct rb_node service_node;
435 };
436 struct list_head proc_link;
437 struct list_head link;
438 struct sk_buff_head rx_queue;
439 const struct rxrpc_security *security;
440 struct key *server_key;
441 struct crypto_sync_skcipher *cipher;
442 struct rxrpc_crypt csum_iv;
443 unsigned long flags;
444 unsigned long events;
445 unsigned long idle_timestamp;
446 spinlock_t state_lock;
447 enum rxrpc_conn_cache_state cache_state;
448 enum rxrpc_conn_proto_state state;
449 u32 abort_code;
450 int debug_id;
451 atomic_t serial;
452 unsigned int hi_serial;
453 u32 security_nonce;
454 u32 service_id;
455 u8 size_align;
456 u8 security_size;
457 u8 security_ix;
458 u8 out_clientflag;
459 short error;
460};
461
462static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
463{
464 return sp->hdr.flags & RXRPC_CLIENT_INITIATED;
465}
466
467static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp)
468{
469 return !rxrpc_to_server(sp);
470}
471
472
473
474
475enum rxrpc_call_flag {
476 RXRPC_CALL_RELEASED,
477 RXRPC_CALL_HAS_USERID,
478 RXRPC_CALL_IS_SERVICE,
479 RXRPC_CALL_EXPOSED,
480 RXRPC_CALL_RX_LAST,
481 RXRPC_CALL_TX_LAST,
482 RXRPC_CALL_SEND_PING,
483 RXRPC_CALL_PINGING,
484 RXRPC_CALL_RETRANS_TIMEOUT,
485 RXRPC_CALL_BEGAN_RX_TIMER,
486 RXRPC_CALL_RX_HEARD,
487 RXRPC_CALL_RX_UNDERRUN,
488 RXRPC_CALL_IS_INTR,
489};
490
491
492
493
494enum rxrpc_call_event {
495 RXRPC_CALL_EV_ACK,
496 RXRPC_CALL_EV_ABORT,
497 RXRPC_CALL_EV_RESEND,
498 RXRPC_CALL_EV_PING,
499 RXRPC_CALL_EV_EXPIRED,
500 RXRPC_CALL_EV_ACK_LOST,
501};
502
503
504
505
506enum rxrpc_call_state {
507 RXRPC_CALL_UNINITIALISED,
508 RXRPC_CALL_CLIENT_AWAIT_CONN,
509 RXRPC_CALL_CLIENT_SEND_REQUEST,
510 RXRPC_CALL_CLIENT_AWAIT_REPLY,
511 RXRPC_CALL_CLIENT_RECV_REPLY,
512 RXRPC_CALL_SERVER_PREALLOC,
513 RXRPC_CALL_SERVER_SECURING,
514 RXRPC_CALL_SERVER_ACCEPTING,
515 RXRPC_CALL_SERVER_RECV_REQUEST,
516 RXRPC_CALL_SERVER_ACK_REQUEST,
517 RXRPC_CALL_SERVER_SEND_REPLY,
518 RXRPC_CALL_SERVER_AWAIT_ACK,
519 RXRPC_CALL_COMPLETE,
520 NR__RXRPC_CALL_STATES
521};
522
523
524
525
526enum rxrpc_call_completion {
527 RXRPC_CALL_SUCCEEDED,
528 RXRPC_CALL_REMOTELY_ABORTED,
529 RXRPC_CALL_LOCALLY_ABORTED,
530 RXRPC_CALL_LOCAL_ERROR,
531 RXRPC_CALL_NETWORK_ERROR,
532 NR__RXRPC_CALL_COMPLETIONS
533};
534
535
536
537
538enum rxrpc_congest_mode {
539 RXRPC_CALL_SLOW_START,
540 RXRPC_CALL_CONGEST_AVOIDANCE,
541 RXRPC_CALL_PACKET_LOSS,
542 RXRPC_CALL_FAST_RETRANSMIT,
543 NR__RXRPC_CONGEST_MODES
544};
545
546
547
548
549
550struct rxrpc_call {
551 struct rcu_head rcu;
552 struct rxrpc_connection *conn;
553 struct rxrpc_peer *peer;
554 struct rxrpc_sock __rcu *socket;
555 struct rxrpc_net *rxnet;
556 struct mutex user_mutex;
557 unsigned long ack_at;
558 unsigned long ack_lost_at;
559 unsigned long resend_at;
560 unsigned long ping_at;
561 unsigned long keepalive_at;
562 unsigned long expect_rx_by;
563 unsigned long expect_req_by;
564 unsigned long expect_term_by;
565 u32 next_rx_timo;
566 u32 next_req_timo;
567 struct timer_list timer;
568 struct work_struct processor;
569 rxrpc_notify_rx_t notify_rx;
570 struct list_head link;
571 struct list_head chan_wait_link;
572 struct hlist_node error_link;
573 struct list_head accept_link;
574 struct list_head recvmsg_link;
575 struct list_head sock_link;
576 struct rb_node sock_node;
577 struct sk_buff *tx_pending;
578 wait_queue_head_t waitq;
579 s64 tx_total_len;
580 __be32 crypto_buf[2];
581 unsigned long user_call_ID;
582 unsigned long flags;
583 unsigned long events;
584 spinlock_t lock;
585 spinlock_t notify_lock;
586 rwlock_t state_lock;
587 u32 abort_code;
588 int error;
589 enum rxrpc_call_state state;
590 enum rxrpc_call_completion completion;
591 atomic_t usage;
592 u16 service_id;
593 u8 security_ix;
594 u32 call_id;
595 u32 cid;
596 int debug_id;
597 unsigned short rx_pkt_offset;
598 unsigned short rx_pkt_len;
599
600
601
602
603
604
605
606
607
608
609#define RXRPC_RXTX_BUFF_SIZE 64
610#define RXRPC_RXTX_BUFF_MASK (RXRPC_RXTX_BUFF_SIZE - 1)
611#define RXRPC_INIT_RX_WINDOW_SIZE 63
612 struct sk_buff **rxtx_buffer;
613 u8 *rxtx_annotations;
614#define RXRPC_TX_ANNO_ACK 0
615#define RXRPC_TX_ANNO_UNACK 1
616#define RXRPC_TX_ANNO_NAK 2
617#define RXRPC_TX_ANNO_RETRANS 3
618#define RXRPC_TX_ANNO_MASK 0x03
619#define RXRPC_TX_ANNO_LAST 0x04
620#define RXRPC_TX_ANNO_RESENT 0x08
621
622#define RXRPC_RX_ANNO_SUBPACKET 0x3f
623#define RXRPC_RX_ANNO_VERIFIED 0x80
624 rxrpc_seq_t tx_hard_ack;
625
626
627 rxrpc_seq_t tx_top;
628 u16 tx_backoff;
629
630
631
632
633
634#define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN
635 u8 cong_cwnd;
636 u8 cong_extra;
637 u8 cong_ssthresh;
638 enum rxrpc_congest_mode cong_mode:8;
639 u8 cong_dup_acks;
640 u8 cong_cumul_acks;
641 ktime_t cong_tstamp;
642
643 rxrpc_seq_t rx_hard_ack;
644
645
646 rxrpc_seq_t rx_top;
647 rxrpc_seq_t rx_expect_next;
648 rxrpc_serial_t rx_serial;
649 u8 rx_winsize;
650 u8 tx_winsize;
651 bool tx_phase;
652 u8 nr_jumbo_bad;
653
654 spinlock_t input_lock;
655
656
657 u8 ackr_reason;
658 rxrpc_serial_t ackr_serial;
659 rxrpc_serial_t ackr_first_seq;
660 rxrpc_seq_t ackr_prev_seq;
661 rxrpc_seq_t ackr_consumed;
662 rxrpc_seq_t ackr_seen;
663
664
665 rxrpc_serial_t ping_serial;
666 ktime_t ping_time;
667
668
669 ktime_t acks_latest_ts;
670 rxrpc_serial_t acks_latest;
671 rxrpc_seq_t acks_lowest_nak;
672 rxrpc_seq_t acks_lost_top;
673 rxrpc_serial_t acks_lost_ping;
674};
675
676
677
678
679struct rxrpc_ack_summary {
680 u8 ack_reason;
681 u8 nr_acks;
682 u8 nr_nacks;
683 u8 nr_new_acks;
684 u8 nr_new_nacks;
685 u8 nr_rot_new_acks;
686 bool new_low_nack;
687 bool retrans_timeo;
688 u8 flight_size;
689
690 enum rxrpc_congest_mode mode:8;
691 u8 cwnd;
692 u8 ssthresh;
693 u8 dup_acks;
694 u8 cumulative_acks;
695};
696
697
698
699
700enum rxrpc_command {
701 RXRPC_CMD_SEND_DATA,
702 RXRPC_CMD_SEND_ABORT,
703 RXRPC_CMD_ACCEPT,
704 RXRPC_CMD_REJECT_BUSY,
705};
706
707struct rxrpc_call_params {
708 s64 tx_total_len;
709 unsigned long user_call_ID;
710 struct {
711 u32 hard;
712 u32 idle;
713 u32 normal;
714 } timeouts;
715 u8 nr_timeouts;
716 bool intr;
717};
718
719struct rxrpc_send_params {
720 struct rxrpc_call_params call;
721 u32 abort_code;
722 enum rxrpc_command command : 8;
723 bool exclusive;
724 bool upgrade;
725};
726
727#include <trace/events/rxrpc.h>
728
729
730
731
732extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
733extern struct workqueue_struct *rxrpc_workqueue;
734
735
736
737
738int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
739void rxrpc_discard_prealloc(struct rxrpc_sock *);
740struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
741 struct rxrpc_sock *,
742 struct sk_buff *);
743void rxrpc_accept_incoming_calls(struct rxrpc_local *);
744struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
745 rxrpc_notify_rx_t);
746int rxrpc_reject_call(struct rxrpc_sock *);
747
748
749
750
751void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool, bool,
752 enum rxrpc_propose_ack_trace);
753void rxrpc_process_call(struct work_struct *);
754
755static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call,
756 unsigned long expire_at,
757 unsigned long now,
758 enum rxrpc_timer_trace why)
759{
760 trace_rxrpc_timer(call, why, now);
761 timer_reduce(&call->timer, expire_at);
762}
763
764
765
766
767extern const char *const rxrpc_call_states[];
768extern const char *const rxrpc_call_completions[];
769extern unsigned int rxrpc_max_call_lifetime;
770extern struct kmem_cache *rxrpc_call_jar;
771
772struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
773struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t, unsigned int);
774struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
775 struct rxrpc_conn_parameters *,
776 struct sockaddr_rxrpc *,
777 struct rxrpc_call_params *, gfp_t,
778 unsigned int);
779void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
780 struct sk_buff *);
781void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
782void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
783bool __rxrpc_queue_call(struct rxrpc_call *);
784bool rxrpc_queue_call(struct rxrpc_call *);
785void rxrpc_see_call(struct rxrpc_call *);
786void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
787void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
788void rxrpc_cleanup_call(struct rxrpc_call *);
789void rxrpc_destroy_all_calls(struct rxrpc_net *);
790
791static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
792{
793 return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags);
794}
795
796static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
797{
798 return !rxrpc_is_service_call(call);
799}
800
801
802
803
804static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call,
805 enum rxrpc_call_completion compl,
806 u32 abort_code,
807 int error)
808{
809 if (call->state < RXRPC_CALL_COMPLETE) {
810 call->abort_code = abort_code;
811 call->error = error;
812 call->completion = compl,
813 call->state = RXRPC_CALL_COMPLETE;
814 trace_rxrpc_call_complete(call);
815 wake_up(&call->waitq);
816 return true;
817 }
818 return false;
819}
820
821static inline bool rxrpc_set_call_completion(struct rxrpc_call *call,
822 enum rxrpc_call_completion compl,
823 u32 abort_code,
824 int error)
825{
826 bool ret;
827
828 write_lock_bh(&call->state_lock);
829 ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
830 write_unlock_bh(&call->state_lock);
831 return ret;
832}
833
834
835
836
837static inline bool __rxrpc_call_completed(struct rxrpc_call *call)
838{
839 return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
840}
841
842static inline bool rxrpc_call_completed(struct rxrpc_call *call)
843{
844 bool ret;
845
846 write_lock_bh(&call->state_lock);
847 ret = __rxrpc_call_completed(call);
848 write_unlock_bh(&call->state_lock);
849 return ret;
850}
851
852
853
854
855static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
856 rxrpc_seq_t seq,
857 u32 abort_code, int error)
858{
859 trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
860 abort_code, error);
861 return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
862 abort_code, error);
863}
864
865static inline bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
866 rxrpc_seq_t seq, u32 abort_code, int error)
867{
868 bool ret;
869
870 write_lock_bh(&call->state_lock);
871 ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
872 write_unlock_bh(&call->state_lock);
873 return ret;
874}
875
876
877
878
879static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
880 struct sk_buff *skb,
881 const char *eproto_why,
882 const char *why,
883 u32 abort_code)
884{
885 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
886
887 trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why);
888 return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO);
889}
890
891#define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \
892 __rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \
893 (abort_why), (abort_code))
894
895
896
897
898extern unsigned int rxrpc_max_client_connections;
899extern unsigned int rxrpc_reap_client_connections;
900extern unsigned long rxrpc_conn_idle_client_expiry;
901extern unsigned long rxrpc_conn_idle_client_fast_expiry;
902extern struct idr rxrpc_client_conn_ids;
903
904void rxrpc_destroy_client_conn_ids(void);
905int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *,
906 struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *,
907 gfp_t);
908void rxrpc_expose_client_call(struct rxrpc_call *);
909void rxrpc_disconnect_client_call(struct rxrpc_call *);
910void rxrpc_put_client_conn(struct rxrpc_connection *);
911void rxrpc_discard_expired_client_conns(struct work_struct *);
912void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
913void rxrpc_clean_up_local_conns(struct rxrpc_local *);
914
915
916
917
918void rxrpc_process_connection(struct work_struct *);
919
920
921
922
923extern unsigned int rxrpc_connection_expiry;
924extern unsigned int rxrpc_closed_conn_expiry;
925
926struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
927struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
928 struct sk_buff *,
929 struct rxrpc_peer **);
930void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
931void rxrpc_disconnect_call(struct rxrpc_call *);
932void rxrpc_kill_connection(struct rxrpc_connection *);
933bool rxrpc_queue_conn(struct rxrpc_connection *);
934void rxrpc_see_connection(struct rxrpc_connection *);
935void rxrpc_get_connection(struct rxrpc_connection *);
936struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *);
937void rxrpc_put_service_conn(struct rxrpc_connection *);
938void rxrpc_service_connection_reaper(struct work_struct *);
939void rxrpc_destroy_all_connections(struct rxrpc_net *);
940
941static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
942{
943 return conn->out_clientflag;
944}
945
946static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
947{
948 return !rxrpc_conn_is_client(conn);
949}
950
951static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
952{
953 if (!conn)
954 return;
955
956 if (rxrpc_conn_is_client(conn))
957 rxrpc_put_client_conn(conn);
958 else
959 rxrpc_put_service_conn(conn);
960}
961
962static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
963 unsigned long expire_at)
964{
965 timer_reduce(&conn->timer, expire_at);
966}
967
968
969
970
971struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
972 struct sk_buff *);
973struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t);
974void rxrpc_new_incoming_connection(struct rxrpc_sock *,
975 struct rxrpc_connection *, struct sk_buff *);
976void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
977
978
979
980
981int rxrpc_input_packet(struct sock *, struct sk_buff *);
982
983
984
985
986extern const struct rxrpc_security rxrpc_no_security;
987
988
989
990
991extern struct key_type key_type_rxrpc;
992extern struct key_type key_type_rxrpc_s;
993
994int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
995int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
996int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time64_t,
997 u32);
998
999
1000
1001
1002extern void rxrpc_process_local_events(struct rxrpc_local *);
1003
1004
1005
1006
1007struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *);
1008struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
1009struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
1010void rxrpc_put_local(struct rxrpc_local *);
1011struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
1012void rxrpc_unuse_local(struct rxrpc_local *);
1013void rxrpc_queue_local(struct rxrpc_local *);
1014void rxrpc_destroy_all_locals(struct rxrpc_net *);
1015
1016
1017
1018
1019extern unsigned int rxrpc_max_backlog __read_mostly;
1020extern unsigned long rxrpc_requested_ack_delay;
1021extern unsigned long rxrpc_soft_ack_delay;
1022extern unsigned long rxrpc_idle_ack_delay;
1023extern unsigned int rxrpc_rx_window_size;
1024extern unsigned int rxrpc_rx_mtu;
1025extern unsigned int rxrpc_rx_jumbo_max;
1026extern unsigned long rxrpc_resend_timeout;
1027
1028extern const s8 rxrpc_ack_priority[];
1029
1030
1031
1032
1033extern unsigned int rxrpc_net_id;
1034extern struct pernet_operations rxrpc_net_ops;
1035
1036static inline struct rxrpc_net *rxrpc_net(struct net *net)
1037{
1038 return net_generic(net, rxrpc_net_id);
1039}
1040
1041
1042
1043
1044int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *);
1045int rxrpc_send_abort_packet(struct rxrpc_call *);
1046int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
1047void rxrpc_reject_packets(struct rxrpc_local *);
1048void rxrpc_send_keepalive(struct rxrpc_peer *);
1049
1050
1051
1052
1053void rxrpc_error_report(struct sock *);
1054void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
1055 rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
1056void rxrpc_peer_keepalive_worker(struct work_struct *);
1057
1058
1059
1060
1061struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
1062 const struct sockaddr_rxrpc *);
1063struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *,
1064 struct sockaddr_rxrpc *, gfp_t);
1065struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
1066void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *,
1067 struct rxrpc_peer *);
1068void rxrpc_destroy_all_peers(struct rxrpc_net *);
1069struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
1070struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
1071void rxrpc_put_peer(struct rxrpc_peer *);
1072void rxrpc_put_peer_locked(struct rxrpc_peer *);
1073
1074
1075
1076
1077extern const struct seq_operations rxrpc_call_seq_ops;
1078extern const struct seq_operations rxrpc_connection_seq_ops;
1079extern const struct seq_operations rxrpc_peer_seq_ops;
1080
1081
1082
1083
1084void rxrpc_notify_socket(struct rxrpc_call *);
1085int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
1086
1087
1088
1089
1090#ifdef CONFIG_RXKAD
1091extern const struct rxrpc_security rxkad;
1092#endif
1093
1094
1095
1096
1097int __init rxrpc_init_security(void);
1098void rxrpc_exit_security(void);
1099int rxrpc_init_client_conn_security(struct rxrpc_connection *);
1100int rxrpc_init_server_conn_security(struct rxrpc_connection *);
1101
1102
1103
1104
1105int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
1106
1107
1108
1109
1110void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
1111void rxrpc_packet_destructor(struct sk_buff *);
1112void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
1113void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
1114void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace);
1115void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
1116void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
1117void rxrpc_purge_queue(struct sk_buff_head *);
1118
1119
1120
1121
1122#ifdef CONFIG_SYSCTL
1123extern int __init rxrpc_sysctl_init(void);
1124extern void rxrpc_sysctl_exit(void);
1125#else
1126static inline int __init rxrpc_sysctl_init(void) { return 0; }
1127static inline void rxrpc_sysctl_exit(void) {}
1128#endif
1129
1130
1131
1132
1133int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
1134
1135static inline bool before(u32 seq1, u32 seq2)
1136{
1137 return (s32)(seq1 - seq2) < 0;
1138}
1139static inline bool before_eq(u32 seq1, u32 seq2)
1140{
1141 return (s32)(seq1 - seq2) <= 0;
1142}
1143static inline bool after(u32 seq1, u32 seq2)
1144{
1145 return (s32)(seq1 - seq2) > 0;
1146}
1147static inline bool after_eq(u32 seq1, u32 seq2)
1148{
1149 return (s32)(seq1 - seq2) >= 0;
1150}
1151
1152
1153
1154
1155extern unsigned int rxrpc_debug;
1156
1157#define dbgprintk(FMT,...) \
1158 printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
1159
1160#define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
1161#define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
1162#define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
1163#define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
1164#define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
1165
1166
1167#if defined(__KDEBUG)
1168#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
1169#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
1170#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
1171#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
1172#define _net(FMT,...) knet(FMT,##__VA_ARGS__)
1173
1174#elif defined(CONFIG_AF_RXRPC_DEBUG)
1175#define RXRPC_DEBUG_KENTER 0x01
1176#define RXRPC_DEBUG_KLEAVE 0x02
1177#define RXRPC_DEBUG_KDEBUG 0x04
1178#define RXRPC_DEBUG_KPROTO 0x08
1179#define RXRPC_DEBUG_KNET 0x10
1180
1181#define _enter(FMT,...) \
1182do { \
1183 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
1184 kenter(FMT,##__VA_ARGS__); \
1185} while (0)
1186
1187#define _leave(FMT,...) \
1188do { \
1189 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
1190 kleave(FMT,##__VA_ARGS__); \
1191} while (0)
1192
1193#define _debug(FMT,...) \
1194do { \
1195 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
1196 kdebug(FMT,##__VA_ARGS__); \
1197} while (0)
1198
1199#define _proto(FMT,...) \
1200do { \
1201 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
1202 kproto(FMT,##__VA_ARGS__); \
1203} while (0)
1204
1205#define _net(FMT,...) \
1206do { \
1207 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
1208 knet(FMT,##__VA_ARGS__); \
1209} while (0)
1210
1211#else
1212#define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
1213#define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
1214#define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
1215#define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
1216#define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
1217#endif
1218
1219
1220
1221
1222#if 1
1223
1224#define ASSERT(X) \
1225do { \
1226 if (unlikely(!(X))) { \
1227 pr_err("Assertion failed\n"); \
1228 BUG(); \
1229 } \
1230} while (0)
1231
1232#define ASSERTCMP(X, OP, Y) \
1233do { \
1234 __typeof__(X) _x = (X); \
1235 __typeof__(Y) _y = (__typeof__(X))(Y); \
1236 if (unlikely(!(_x OP _y))) { \
1237 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
1238 (unsigned long)_x, (unsigned long)_x, #OP, \
1239 (unsigned long)_y, (unsigned long)_y); \
1240 BUG(); \
1241 } \
1242} while (0)
1243
1244#define ASSERTIF(C, X) \
1245do { \
1246 if (unlikely((C) && !(X))) { \
1247 pr_err("Assertion failed\n"); \
1248 BUG(); \
1249 } \
1250} while (0)
1251
1252#define ASSERTIFCMP(C, X, OP, Y) \
1253do { \
1254 __typeof__(X) _x = (X); \
1255 __typeof__(Y) _y = (__typeof__(X))(Y); \
1256 if (unlikely((C) && !(_x OP _y))) { \
1257 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
1258 (unsigned long)_x, (unsigned long)_x, #OP, \
1259 (unsigned long)_y, (unsigned long)_y); \
1260 BUG(); \
1261 } \
1262} while (0)
1263
1264#else
1265
1266#define ASSERT(X) \
1267do { \
1268} while (0)
1269
1270#define ASSERTCMP(X, OP, Y) \
1271do { \
1272} while (0)
1273
1274#define ASSERTIF(C, X) \
1275do { \
1276} while (0)
1277
1278#define ASSERTIFCMP(C, X, OP, Y) \
1279do { \
1280} while (0)
1281
1282#endif
1283