1
2
3
4
5
6
7
8
9
10
11
12#include <linux/atomic.h>
13#include <linux/seqlock.h>
14#include <net/sock.h>
15#include <net/af_rxrpc.h>
16#include <rxrpc/packet.h>
17
18#if 0
19#define CHECK_SLAB_OKAY(X) \
20 BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
21 (POISON_FREE << 8 | POISON_FREE))
22#else
23#define CHECK_SLAB_OKAY(X) do {} while (0)
24#endif
25
26#define FCRYPT_BSIZE 8
27struct rxrpc_crypt {
28 union {
29 u8 x[FCRYPT_BSIZE];
30 __be32 n[2];
31 };
32} __attribute__((aligned(8)));
33
34#define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
35#define rxrpc_queue_delayed_work(WS,D) \
36 queue_delayed_work(rxrpc_workqueue, (WS), (D))
37
38struct rxrpc_connection;
39
40
41
42
43enum rxrpc_skb_mark {
44 RXRPC_SKB_MARK_DATA,
45 RXRPC_SKB_MARK_FINAL_ACK,
46 RXRPC_SKB_MARK_BUSY,
47 RXRPC_SKB_MARK_REMOTE_ABORT,
48 RXRPC_SKB_MARK_LOCAL_ABORT,
49 RXRPC_SKB_MARK_NET_ERROR,
50 RXRPC_SKB_MARK_LOCAL_ERROR,
51 RXRPC_SKB_MARK_NEW_CALL,
52};
53
54
55
56
57enum {
58 RXRPC_UNBOUND = 0,
59 RXRPC_CLIENT_UNBOUND,
60 RXRPC_CLIENT_BOUND,
61 RXRPC_SERVER_BOUND,
62 RXRPC_SERVER_LISTENING,
63 RXRPC_SERVER_LISTEN_DISABLED,
64 RXRPC_CLOSE,
65};
66
67
68
69
70
71
72
73
74
75struct rxrpc_backlog {
76 unsigned short peer_backlog_head;
77 unsigned short peer_backlog_tail;
78 unsigned short conn_backlog_head;
79 unsigned short conn_backlog_tail;
80 unsigned short call_backlog_head;
81 unsigned short call_backlog_tail;
82#define RXRPC_BACKLOG_MAX 32
83 struct rxrpc_peer *peer_backlog[RXRPC_BACKLOG_MAX];
84 struct rxrpc_connection *conn_backlog[RXRPC_BACKLOG_MAX];
85 struct rxrpc_call *call_backlog[RXRPC_BACKLOG_MAX];
86};
87
88
89
90
91struct rxrpc_sock {
92
93 struct sock sk;
94 rxrpc_notify_new_call_t notify_new_call;
95 rxrpc_discard_new_call_t discard_new_call;
96 struct rxrpc_local *local;
97 struct rxrpc_backlog *backlog;
98 spinlock_t incoming_lock;
99 struct list_head sock_calls;
100 struct list_head to_be_accepted;
101 struct list_head recvmsg_q;
102 rwlock_t recvmsg_lock;
103 struct key *key;
104 struct key *securities;
105 struct rb_root calls;
106 unsigned long flags;
107#define RXRPC_SOCK_CONNECTED 0
108 rwlock_t call_lock;
109 u32 min_sec_level;
110#define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
111 bool exclusive;
112 sa_family_t family;
113 struct sockaddr_rxrpc srx;
114 struct sockaddr_rxrpc connect_srx;
115};
116
117#define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
118
119
120
121
122struct rxrpc_host_header {
123 u32 epoch;
124 u32 cid;
125 u32 callNumber;
126 u32 seq;
127 u32 serial;
128 u8 type;
129 u8 flags;
130 u8 userStatus;
131 u8 securityIndex;
132 union {
133 u16 _rsvd;
134 u16 cksum;
135 };
136 u16 serviceId;
137} __packed;
138
139
140
141
142
143struct rxrpc_skb_priv {
144 union {
145 u8 nr_jumbo;
146 };
147 union {
148 int remain;
149 };
150
151 struct rxrpc_host_header hdr;
152};
153
154#define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
155
156
157
158
159struct rxrpc_security {
160 const char *name;
161 u8 security_index;
162
163
164 int (*init)(void);
165
166
167 void (*exit)(void);
168
169
170 int (*init_connection_security)(struct rxrpc_connection *);
171
172
173 int (*prime_packet_security)(struct rxrpc_connection *);
174
175
176 int (*secure_packet)(struct rxrpc_call *,
177 struct sk_buff *,
178 size_t,
179 void *);
180
181
182 int (*verify_packet)(struct rxrpc_call *, struct sk_buff *,
183 unsigned int, unsigned int, rxrpc_seq_t, u16);
184
185
186 void (*locate_data)(struct rxrpc_call *, struct sk_buff *,
187 unsigned int *, unsigned int *);
188
189
190 int (*issue_challenge)(struct rxrpc_connection *);
191
192
193 int (*respond_to_challenge)(struct rxrpc_connection *,
194 struct sk_buff *,
195 u32 *);
196
197
198 int (*verify_response)(struct rxrpc_connection *,
199 struct sk_buff *,
200 u32 *);
201
202
203 void (*clear)(struct rxrpc_connection *);
204};
205
206
207
208
209
210
211struct rxrpc_local {
212 struct rcu_head rcu;
213 atomic_t usage;
214 struct list_head link;
215 struct socket *socket;
216 struct work_struct processor;
217 struct rxrpc_sock __rcu *service;
218 struct rw_semaphore defrag_sem;
219 struct sk_buff_head reject_queue;
220 struct sk_buff_head event_queue;
221 struct rb_root client_conns;
222 spinlock_t client_conns_lock;
223 spinlock_t lock;
224 rwlock_t services_lock;
225 int debug_id;
226 bool dead;
227 struct sockaddr_rxrpc srx;
228};
229
230
231
232
233
234struct rxrpc_peer {
235 struct rcu_head rcu;
236 atomic_t usage;
237 unsigned long hash_key;
238 struct hlist_node hash_link;
239 struct rxrpc_local *local;
240 struct hlist_head error_targets;
241 struct work_struct error_distributor;
242 struct rb_root service_conns;
243 seqlock_t service_conn_lock;
244 spinlock_t lock;
245 unsigned int if_mtu;
246 unsigned int mtu;
247 unsigned int maxdata;
248 unsigned short hdrsize;
249 int debug_id;
250 int error_report;
251#define RXRPC_LOCAL_ERROR_OFFSET 1000000
252 struct sockaddr_rxrpc srx;
253
254
255#define RXRPC_RTT_CACHE_SIZE 32
256 ktime_t rtt_last_req;
257 u64 rtt;
258 u64 rtt_sum;
259 u64 rtt_cache[RXRPC_RTT_CACHE_SIZE];
260 u8 rtt_cursor;
261 u8 rtt_usage;
262};
263
264
265
266
267struct rxrpc_conn_proto {
268 union {
269 struct {
270 u32 epoch;
271 u32 cid;
272 };
273 u64 index_key;
274 };
275};
276
277struct rxrpc_conn_parameters {
278 struct rxrpc_local *local;
279 struct rxrpc_peer *peer;
280 struct key *key;
281 bool exclusive;
282 u16 service_id;
283 u32 security_level;
284};
285
286
287
288
289enum rxrpc_conn_flag {
290 RXRPC_CONN_HAS_IDR,
291 RXRPC_CONN_IN_SERVICE_CONNS,
292 RXRPC_CONN_IN_CLIENT_CONNS,
293 RXRPC_CONN_EXPOSED,
294 RXRPC_CONN_DONT_REUSE,
295 RXRPC_CONN_COUNTED,
296};
297
298
299
300
301enum rxrpc_conn_event {
302 RXRPC_CONN_EV_CHALLENGE,
303};
304
305
306
307
308enum rxrpc_conn_cache_state {
309 RXRPC_CONN_CLIENT_INACTIVE,
310 RXRPC_CONN_CLIENT_WAITING,
311 RXRPC_CONN_CLIENT_ACTIVE,
312 RXRPC_CONN_CLIENT_CULLED,
313 RXRPC_CONN_CLIENT_IDLE,
314 RXRPC_CONN__NR_CACHE_STATES
315};
316
317
318
319
320enum rxrpc_conn_proto_state {
321 RXRPC_CONN_UNUSED,
322 RXRPC_CONN_CLIENT,
323 RXRPC_CONN_SERVICE_PREALLOC,
324 RXRPC_CONN_SERVICE_UNSECURED,
325 RXRPC_CONN_SERVICE_CHALLENGING,
326 RXRPC_CONN_SERVICE,
327 RXRPC_CONN_REMOTELY_ABORTED,
328 RXRPC_CONN_LOCALLY_ABORTED,
329 RXRPC_CONN__NR_STATES
330};
331
332
333
334
335
336
337struct rxrpc_connection {
338 struct rxrpc_conn_proto proto;
339 struct rxrpc_conn_parameters params;
340
341 atomic_t usage;
342 struct rcu_head rcu;
343 struct list_head cache_link;
344
345 spinlock_t channel_lock;
346 unsigned char active_chans;
347#define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
348 struct list_head waiting_calls;
349 struct rxrpc_channel {
350 struct rxrpc_call __rcu *call;
351 u32 call_id;
352 u32 call_counter;
353 u32 last_call;
354 u8 last_type;
355 u16 last_service_id;
356 union {
357 u32 last_seq;
358 u32 last_abort;
359 };
360 } channels[RXRPC_MAXCALLS];
361
362 struct work_struct processor;
363 union {
364 struct rb_node client_node;
365 struct rb_node service_node;
366 };
367 struct list_head proc_link;
368 struct list_head link;
369 struct sk_buff_head rx_queue;
370 const struct rxrpc_security *security;
371 struct key *server_key;
372 struct crypto_skcipher *cipher;
373 struct rxrpc_crypt csum_iv;
374 unsigned long flags;
375 unsigned long events;
376 unsigned long idle_timestamp;
377 spinlock_t state_lock;
378 enum rxrpc_conn_cache_state cache_state;
379 enum rxrpc_conn_proto_state state;
380 u32 local_abort;
381 u32 remote_abort;
382 int debug_id;
383 atomic_t serial;
384 unsigned int hi_serial;
385 u32 security_nonce;
386 u8 size_align;
387 u8 security_size;
388 u8 security_ix;
389 u8 out_clientflag;
390};
391
392
393
394
395enum rxrpc_call_flag {
396 RXRPC_CALL_RELEASED,
397 RXRPC_CALL_HAS_USERID,
398 RXRPC_CALL_IS_SERVICE,
399 RXRPC_CALL_EXPOSED,
400 RXRPC_CALL_RX_LAST,
401 RXRPC_CALL_TX_LAST,
402 RXRPC_CALL_SEND_PING,
403 RXRPC_CALL_PINGING,
404 RXRPC_CALL_RETRANS_TIMEOUT,
405};
406
407
408
409
410enum rxrpc_call_event {
411 RXRPC_CALL_EV_ACK,
412 RXRPC_CALL_EV_ABORT,
413 RXRPC_CALL_EV_TIMER,
414 RXRPC_CALL_EV_RESEND,
415 RXRPC_CALL_EV_PING,
416};
417
418
419
420
421enum rxrpc_call_state {
422 RXRPC_CALL_UNINITIALISED,
423 RXRPC_CALL_CLIENT_AWAIT_CONN,
424 RXRPC_CALL_CLIENT_SEND_REQUEST,
425 RXRPC_CALL_CLIENT_AWAIT_REPLY,
426 RXRPC_CALL_CLIENT_RECV_REPLY,
427 RXRPC_CALL_SERVER_PREALLOC,
428 RXRPC_CALL_SERVER_SECURING,
429 RXRPC_CALL_SERVER_ACCEPTING,
430 RXRPC_CALL_SERVER_RECV_REQUEST,
431 RXRPC_CALL_SERVER_ACK_REQUEST,
432 RXRPC_CALL_SERVER_SEND_REPLY,
433 RXRPC_CALL_SERVER_AWAIT_ACK,
434 RXRPC_CALL_COMPLETE,
435 NR__RXRPC_CALL_STATES
436};
437
438
439
440
441enum rxrpc_call_completion {
442 RXRPC_CALL_SUCCEEDED,
443 RXRPC_CALL_REMOTELY_ABORTED,
444 RXRPC_CALL_LOCALLY_ABORTED,
445 RXRPC_CALL_LOCAL_ERROR,
446 RXRPC_CALL_NETWORK_ERROR,
447 NR__RXRPC_CALL_COMPLETIONS
448};
449
450
451
452
453enum rxrpc_congest_mode {
454 RXRPC_CALL_SLOW_START,
455 RXRPC_CALL_CONGEST_AVOIDANCE,
456 RXRPC_CALL_PACKET_LOSS,
457 RXRPC_CALL_FAST_RETRANSMIT,
458 NR__RXRPC_CONGEST_MODES
459};
460
461
462
463
464
465struct rxrpc_call {
466 struct rcu_head rcu;
467 struct rxrpc_connection *conn;
468 struct rxrpc_peer *peer;
469 struct rxrpc_sock __rcu *socket;
470 struct mutex user_mutex;
471 ktime_t ack_at;
472 ktime_t resend_at;
473 ktime_t ping_at;
474 ktime_t expire_at;
475 struct timer_list timer;
476 struct work_struct processor;
477 rxrpc_notify_rx_t notify_rx;
478 struct list_head link;
479 struct list_head chan_wait_link;
480 struct hlist_node error_link;
481 struct list_head accept_link;
482 struct list_head recvmsg_link;
483 struct list_head sock_link;
484 struct rb_node sock_node;
485 struct sk_buff *tx_pending;
486 wait_queue_head_t waitq;
487 __be32 crypto_buf[2];
488 unsigned long user_call_ID;
489 unsigned long flags;
490 unsigned long events;
491 spinlock_t lock;
492 rwlock_t state_lock;
493 u32 abort_code;
494 int error;
495 enum rxrpc_call_state state;
496 enum rxrpc_call_completion completion;
497 atomic_t usage;
498 u16 service_id;
499 u8 security_ix;
500 u32 call_id;
501 u32 cid;
502 int debug_id;
503 unsigned short rx_pkt_offset;
504 unsigned short rx_pkt_len;
505
506
507
508
509
510
511
512
513
514
515#define RXRPC_RXTX_BUFF_SIZE 64
516#define RXRPC_RXTX_BUFF_MASK (RXRPC_RXTX_BUFF_SIZE - 1)
517#define RXRPC_INIT_RX_WINDOW_SIZE 32
518 struct sk_buff **rxtx_buffer;
519 u8 *rxtx_annotations;
520#define RXRPC_TX_ANNO_ACK 0
521#define RXRPC_TX_ANNO_UNACK 1
522#define RXRPC_TX_ANNO_NAK 2
523#define RXRPC_TX_ANNO_RETRANS 3
524#define RXRPC_TX_ANNO_MASK 0x03
525#define RXRPC_TX_ANNO_LAST 0x04
526#define RXRPC_TX_ANNO_RESENT 0x08
527
528#define RXRPC_RX_ANNO_JUMBO 0x3f
529#define RXRPC_RX_ANNO_JLAST 0x40
530#define RXRPC_RX_ANNO_VERIFIED 0x80
531 rxrpc_seq_t tx_hard_ack;
532
533
534 rxrpc_seq_t tx_top;
535
536
537
538
539
540#define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN
541 u8 cong_cwnd;
542 u8 cong_extra;
543 u8 cong_ssthresh;
544 enum rxrpc_congest_mode cong_mode:8;
545 u8 cong_dup_acks;
546 u8 cong_cumul_acks;
547 ktime_t cong_tstamp;
548
549 rxrpc_seq_t rx_hard_ack;
550
551
552 rxrpc_seq_t rx_top;
553 rxrpc_seq_t rx_expect_next;
554 u8 rx_winsize;
555 u8 tx_winsize;
556 bool tx_phase;
557 u8 nr_jumbo_bad;
558
559
560 u8 ackr_reason;
561 u16 ackr_skew;
562 rxrpc_serial_t ackr_serial;
563 rxrpc_seq_t ackr_prev_seq;
564 rxrpc_seq_t ackr_consumed;
565 rxrpc_seq_t ackr_seen;
566
567
568 rxrpc_serial_t ping_serial;
569 ktime_t ping_time;
570
571
572 ktime_t acks_latest_ts;
573 rxrpc_serial_t acks_latest;
574 rxrpc_seq_t acks_lowest_nak;
575};
576
577
578
579
580struct rxrpc_ack_summary {
581 u8 ack_reason;
582 u8 nr_acks;
583 u8 nr_nacks;
584 u8 nr_new_acks;
585 u8 nr_new_nacks;
586 u8 nr_rot_new_acks;
587 bool new_low_nack;
588 bool retrans_timeo;
589 u8 flight_size;
590
591 enum rxrpc_congest_mode mode:8;
592 u8 cwnd;
593 u8 ssthresh;
594 u8 dup_acks;
595 u8 cumulative_acks;
596};
597
598#include <trace/events/rxrpc.h>
599
600
601
602
603extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
604extern u32 rxrpc_epoch;
605extern atomic_t rxrpc_debug_id;
606extern struct workqueue_struct *rxrpc_workqueue;
607
608
609
610
611int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
612void rxrpc_discard_prealloc(struct rxrpc_sock *);
613struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
614 struct rxrpc_connection *,
615 struct sk_buff *);
616void rxrpc_accept_incoming_calls(struct rxrpc_local *);
617struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
618 rxrpc_notify_rx_t);
619int rxrpc_reject_call(struct rxrpc_sock *);
620
621
622
623
624void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
625void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
626void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
627 enum rxrpc_propose_ack_trace);
628void rxrpc_process_call(struct work_struct *);
629
630
631
632
633extern const char *const rxrpc_call_states[];
634extern const char *const rxrpc_call_completions[];
635extern unsigned int rxrpc_max_call_lifetime;
636extern struct kmem_cache *rxrpc_call_jar;
637extern struct list_head rxrpc_calls;
638extern rwlock_t rxrpc_call_lock;
639
640struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
641struct rxrpc_call *rxrpc_alloc_call(gfp_t);
642struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
643 struct rxrpc_conn_parameters *,
644 struct sockaddr_rxrpc *,
645 unsigned long, gfp_t);
646void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
647 struct sk_buff *);
648void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
649void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
650bool __rxrpc_queue_call(struct rxrpc_call *);
651bool rxrpc_queue_call(struct rxrpc_call *);
652void rxrpc_see_call(struct rxrpc_call *);
653void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
654void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
655void rxrpc_cleanup_call(struct rxrpc_call *);
656void __exit rxrpc_destroy_all_calls(void);
657
658static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
659{
660 return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags);
661}
662
663static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
664{
665 return !rxrpc_is_service_call(call);
666}
667
668
669
670
671static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call,
672 enum rxrpc_call_completion compl,
673 u32 abort_code,
674 int error)
675{
676 if (call->state < RXRPC_CALL_COMPLETE) {
677 call->abort_code = abort_code;
678 call->error = error;
679 call->completion = compl,
680 call->state = RXRPC_CALL_COMPLETE;
681 wake_up(&call->waitq);
682 return true;
683 }
684 return false;
685}
686
687static inline bool rxrpc_set_call_completion(struct rxrpc_call *call,
688 enum rxrpc_call_completion compl,
689 u32 abort_code,
690 int error)
691{
692 bool ret;
693
694 write_lock_bh(&call->state_lock);
695 ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
696 write_unlock_bh(&call->state_lock);
697 return ret;
698}
699
700
701
702
703static inline bool __rxrpc_call_completed(struct rxrpc_call *call)
704{
705 return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
706}
707
708static inline bool rxrpc_call_completed(struct rxrpc_call *call)
709{
710 bool ret;
711
712 write_lock_bh(&call->state_lock);
713 ret = __rxrpc_call_completed(call);
714 write_unlock_bh(&call->state_lock);
715 return ret;
716}
717
718
719
720
721static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
722 rxrpc_seq_t seq,
723 u32 abort_code, int error)
724{
725 trace_rxrpc_abort(why, call->cid, call->call_id, seq,
726 abort_code, error);
727 return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
728 abort_code, error);
729}
730
731static inline bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
732 rxrpc_seq_t seq, u32 abort_code, int error)
733{
734 bool ret;
735
736 write_lock_bh(&call->state_lock);
737 ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
738 write_unlock_bh(&call->state_lock);
739 return ret;
740}
741
742
743
744
745extern unsigned int rxrpc_max_client_connections;
746extern unsigned int rxrpc_reap_client_connections;
747extern unsigned int rxrpc_conn_idle_client_expiry;
748extern unsigned int rxrpc_conn_idle_client_fast_expiry;
749extern struct idr rxrpc_client_conn_ids;
750
751void rxrpc_destroy_client_conn_ids(void);
752int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *,
753 struct sockaddr_rxrpc *, gfp_t);
754void rxrpc_expose_client_call(struct rxrpc_call *);
755void rxrpc_disconnect_client_call(struct rxrpc_call *);
756void rxrpc_put_client_conn(struct rxrpc_connection *);
757void __exit rxrpc_destroy_all_client_connections(void);
758
759
760
761
762void rxrpc_process_connection(struct work_struct *);
763
764
765
766
767extern unsigned int rxrpc_connection_expiry;
768extern struct list_head rxrpc_connections;
769extern struct list_head rxrpc_connection_proc_list;
770extern rwlock_t rxrpc_connection_lock;
771
772int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
773struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
774struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
775 struct sk_buff *);
776void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
777void rxrpc_disconnect_call(struct rxrpc_call *);
778void rxrpc_kill_connection(struct rxrpc_connection *);
779bool rxrpc_queue_conn(struct rxrpc_connection *);
780void rxrpc_see_connection(struct rxrpc_connection *);
781void rxrpc_get_connection(struct rxrpc_connection *);
782struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *);
783void rxrpc_put_service_conn(struct rxrpc_connection *);
784void __exit rxrpc_destroy_all_connections(void);
785
786static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
787{
788 return conn->out_clientflag;
789}
790
791static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
792{
793 return !rxrpc_conn_is_client(conn);
794}
795
796static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
797{
798 if (!conn)
799 return;
800
801 if (rxrpc_conn_is_client(conn))
802 rxrpc_put_client_conn(conn);
803 else
804 rxrpc_put_service_conn(conn);
805}
806
807
808
809
810struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
811 struct sk_buff *);
812struct rxrpc_connection *rxrpc_prealloc_service_connection(gfp_t);
813void rxrpc_new_incoming_connection(struct rxrpc_connection *, struct sk_buff *);
814void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
815
816
817
818
819void rxrpc_data_ready(struct sock *);
820
821
822
823
824extern const struct rxrpc_security rxrpc_no_security;
825
826
827
828
829extern struct key_type key_type_rxrpc;
830extern struct key_type key_type_rxrpc_s;
831
832int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
833int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
834int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
835 u32);
836
837
838
839
840extern void rxrpc_process_local_events(struct rxrpc_local *);
841
842
843
844
845struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *);
846void __rxrpc_put_local(struct rxrpc_local *);
847void __exit rxrpc_destroy_all_locals(void);
848
849static inline void rxrpc_get_local(struct rxrpc_local *local)
850{
851 atomic_inc(&local->usage);
852}
853
854static inline
855struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
856{
857 return atomic_inc_not_zero(&local->usage) ? local : NULL;
858}
859
860static inline void rxrpc_put_local(struct rxrpc_local *local)
861{
862 if (local && atomic_dec_and_test(&local->usage))
863 __rxrpc_put_local(local);
864}
865
866static inline void rxrpc_queue_local(struct rxrpc_local *local)
867{
868 rxrpc_queue_work(&local->processor);
869}
870
871
872
873
874extern unsigned int rxrpc_max_backlog __read_mostly;
875extern unsigned int rxrpc_requested_ack_delay;
876extern unsigned int rxrpc_soft_ack_delay;
877extern unsigned int rxrpc_idle_ack_delay;
878extern unsigned int rxrpc_rx_window_size;
879extern unsigned int rxrpc_rx_mtu;
880extern unsigned int rxrpc_rx_jumbo_max;
881extern unsigned int rxrpc_resend_timeout;
882
883extern const s8 rxrpc_ack_priority[];
884
885
886
887
888int rxrpc_send_ack_packet(struct rxrpc_call *, bool);
889int rxrpc_send_abort_packet(struct rxrpc_call *);
890int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
891void rxrpc_reject_packets(struct rxrpc_local *);
892
893
894
895
896void rxrpc_error_report(struct sock *);
897void rxrpc_peer_error_distributor(struct work_struct *);
898void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
899 rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
900
901
902
903
904struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
905 const struct sockaddr_rxrpc *);
906struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
907 struct sockaddr_rxrpc *, gfp_t);
908struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
909struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
910 struct rxrpc_peer *);
911
912static inline struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
913{
914 atomic_inc(&peer->usage);
915 return peer;
916}
917
918static inline
919struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
920{
921 return atomic_inc_not_zero(&peer->usage) ? peer : NULL;
922}
923
924extern void __rxrpc_put_peer(struct rxrpc_peer *peer);
925static inline void rxrpc_put_peer(struct rxrpc_peer *peer)
926{
927 if (peer && atomic_dec_and_test(&peer->usage))
928 __rxrpc_put_peer(peer);
929}
930
931
932
933
934extern const struct file_operations rxrpc_call_seq_fops;
935extern const struct file_operations rxrpc_connection_seq_fops;
936
937
938
939
940void rxrpc_notify_socket(struct rxrpc_call *);
941int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
942
943
944
945
946#ifdef CONFIG_RXKAD
947extern const struct rxrpc_security rxkad;
948#endif
949
950
951
952
953int __init rxrpc_init_security(void);
954void rxrpc_exit_security(void);
955int rxrpc_init_client_conn_security(struct rxrpc_connection *);
956int rxrpc_init_server_conn_security(struct rxrpc_connection *);
957
958
959
960
961int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
962
963
964
965
966void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
967void rxrpc_packet_destructor(struct sk_buff *);
968void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
969void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
970void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
971void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
972void rxrpc_lose_skb(struct sk_buff *, enum rxrpc_skb_trace);
973void rxrpc_purge_queue(struct sk_buff_head *);
974
975
976
977
978#ifdef CONFIG_SYSCTL
979extern int __init rxrpc_sysctl_init(void);
980extern void rxrpc_sysctl_exit(void);
981#else
982static inline int __init rxrpc_sysctl_init(void) { return 0; }
983static inline void rxrpc_sysctl_exit(void) {}
984#endif
985
986
987
988
989int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
990
991static inline bool before(u32 seq1, u32 seq2)
992{
993 return (s32)(seq1 - seq2) < 0;
994}
995static inline bool before_eq(u32 seq1, u32 seq2)
996{
997 return (s32)(seq1 - seq2) <= 0;
998}
999static inline bool after(u32 seq1, u32 seq2)
1000{
1001 return (s32)(seq1 - seq2) > 0;
1002}
1003static inline bool after_eq(u32 seq1, u32 seq2)
1004{
1005 return (s32)(seq1 - seq2) >= 0;
1006}
1007
1008
1009
1010
1011extern unsigned int rxrpc_debug;
1012
1013#define dbgprintk(FMT,...) \
1014 printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
1015
1016#define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
1017#define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
1018#define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
1019#define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
1020#define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
1021
1022
1023#if defined(__KDEBUG)
1024#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
1025#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
1026#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
1027#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
1028#define _net(FMT,...) knet(FMT,##__VA_ARGS__)
1029
1030#elif defined(CONFIG_AF_RXRPC_DEBUG)
1031#define RXRPC_DEBUG_KENTER 0x01
1032#define RXRPC_DEBUG_KLEAVE 0x02
1033#define RXRPC_DEBUG_KDEBUG 0x04
1034#define RXRPC_DEBUG_KPROTO 0x08
1035#define RXRPC_DEBUG_KNET 0x10
1036
1037#define _enter(FMT,...) \
1038do { \
1039 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
1040 kenter(FMT,##__VA_ARGS__); \
1041} while (0)
1042
1043#define _leave(FMT,...) \
1044do { \
1045 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
1046 kleave(FMT,##__VA_ARGS__); \
1047} while (0)
1048
1049#define _debug(FMT,...) \
1050do { \
1051 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
1052 kdebug(FMT,##__VA_ARGS__); \
1053} while (0)
1054
1055#define _proto(FMT,...) \
1056do { \
1057 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
1058 kproto(FMT,##__VA_ARGS__); \
1059} while (0)
1060
1061#define _net(FMT,...) \
1062do { \
1063 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
1064 knet(FMT,##__VA_ARGS__); \
1065} while (0)
1066
1067#else
1068#define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
1069#define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
1070#define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
1071#define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
1072#define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
1073#endif
1074
1075
1076
1077
1078#if 1
1079
1080#define ASSERT(X) \
1081do { \
1082 if (unlikely(!(X))) { \
1083 pr_err("Assertion failed\n"); \
1084 BUG(); \
1085 } \
1086} while (0)
1087
1088#define ASSERTCMP(X, OP, Y) \
1089do { \
1090 __typeof__(X) _x = (X); \
1091 __typeof__(Y) _y = (__typeof__(X))(Y); \
1092 if (unlikely(!(_x OP _y))) { \
1093 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
1094 (unsigned long)_x, (unsigned long)_x, #OP, \
1095 (unsigned long)_y, (unsigned long)_y); \
1096 BUG(); \
1097 } \
1098} while (0)
1099
1100#define ASSERTIF(C, X) \
1101do { \
1102 if (unlikely((C) && !(X))) { \
1103 pr_err("Assertion failed\n"); \
1104 BUG(); \
1105 } \
1106} while (0)
1107
1108#define ASSERTIFCMP(C, X, OP, Y) \
1109do { \
1110 __typeof__(X) _x = (X); \
1111 __typeof__(Y) _y = (__typeof__(X))(Y); \
1112 if (unlikely((C) && !(_x OP _y))) { \
1113 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
1114 (unsigned long)_x, (unsigned long)_x, #OP, \
1115 (unsigned long)_y, (unsigned long)_y); \
1116 BUG(); \
1117 } \
1118} while (0)
1119
1120#else
1121
1122#define ASSERT(X) \
1123do { \
1124} while (0)
1125
1126#define ASSERTCMP(X, OP, Y) \
1127do { \
1128} while (0)
1129
1130#define ASSERTIF(C, X) \
1131do { \
1132} while (0)
1133
1134#define ASSERTIFCMP(C, X, OP, Y) \
1135do { \
1136} while (0)
1137
1138#endif
1139