1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#define pr_fmt(fmt) "X25: " fmt
27
28#include <linux/slab.h>
29#include <linux/errno.h>
30#include <linux/kernel.h>
31#include <linux/string.h>
32#include <linux/skbuff.h>
33#include <net/sock.h>
34#include <net/tcp_states.h>
35#include <net/x25.h>
36
37static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
38{
39 struct sk_buff *skbo, *skbn = skb;
40 struct x25_sock *x25 = x25_sk(sk);
41
42 if (more) {
43 x25->fraglen += skb->len;
44 skb_queue_tail(&x25->fragment_queue, skb);
45 skb_set_owner_r(skb, sk);
46 return 0;
47 }
48
49 if (!more && x25->fraglen > 0) {
50 int len = x25->fraglen + skb->len;
51
52 if ((skbn = alloc_skb(len, GFP_ATOMIC)) == NULL){
53 kfree_skb(skb);
54 return 1;
55 }
56
57 skb_queue_tail(&x25->fragment_queue, skb);
58
59 skb_reset_transport_header(skbn);
60
61 skbo = skb_dequeue(&x25->fragment_queue);
62 skb_copy_from_linear_data(skbo, skb_put(skbn, skbo->len),
63 skbo->len);
64 kfree_skb(skbo);
65
66 while ((skbo =
67 skb_dequeue(&x25->fragment_queue)) != NULL) {
68 skb_pull(skbo, (x25->neighbour->extended) ?
69 X25_EXT_MIN_LEN : X25_STD_MIN_LEN);
70 skb_copy_from_linear_data(skbo,
71 skb_put(skbn, skbo->len),
72 skbo->len);
73 kfree_skb(skbo);
74 }
75
76 x25->fraglen = 0;
77 }
78
79 skb_set_owner_r(skbn, sk);
80 skb_queue_tail(&sk->sk_receive_queue, skbn);
81 if (!sock_flag(sk, SOCK_DEAD))
82 sk->sk_data_ready(sk);
83
84 return 0;
85}
86
87
88
89
90
91
92static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
93{
94 struct x25_address source_addr, dest_addr;
95 int len;
96 struct x25_sock *x25 = x25_sk(sk);
97
98 switch (frametype) {
99 case X25_CALL_ACCEPTED: {
100
101 x25_stop_timer(sk);
102 x25->condition = 0x00;
103 x25->vs = 0;
104 x25->va = 0;
105 x25->vr = 0;
106 x25->vl = 0;
107 x25->state = X25_STATE_3;
108 sk->sk_state = TCP_ESTABLISHED;
109
110
111
112 if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
113 goto out_clear;
114 skb_pull(skb, X25_STD_MIN_LEN);
115
116 len = x25_parse_address_block(skb, &source_addr,
117 &dest_addr);
118 if (len > 0)
119 skb_pull(skb, len);
120 else if (len < 0)
121 goto out_clear;
122
123 len = x25_parse_facilities(skb, &x25->facilities,
124 &x25->dte_facilities,
125 &x25->vc_facil_mask);
126 if (len > 0)
127 skb_pull(skb, len);
128 else if (len < 0)
129 goto out_clear;
130
131
132
133 if (skb->len > 0) {
134 if (skb->len > X25_MAX_CUD_LEN)
135 goto out_clear;
136
137 skb_copy_bits(skb, 0, x25->calluserdata.cuddata,
138 skb->len);
139 x25->calluserdata.cudlength = skb->len;
140 }
141 if (!sock_flag(sk, SOCK_DEAD))
142 sk->sk_state_change(sk);
143 break;
144 }
145 case X25_CLEAR_REQUEST:
146 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
147 goto out_clear;
148
149 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
150 x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
151 break;
152
153 default:
154 break;
155 }
156
157 return 0;
158
159out_clear:
160 x25_write_internal(sk, X25_CLEAR_REQUEST);
161 x25->state = X25_STATE_2;
162 x25_start_t23timer(sk);
163 return 0;
164}
165
166
167
168
169
170
171static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype)
172{
173 switch (frametype) {
174
175 case X25_CLEAR_REQUEST:
176 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
177 goto out_clear;
178
179 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
180 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
181 break;
182
183 case X25_CLEAR_CONFIRMATION:
184 x25_disconnect(sk, 0, 0, 0);
185 break;
186
187 default:
188 break;
189 }
190
191 return 0;
192
193out_clear:
194 x25_write_internal(sk, X25_CLEAR_REQUEST);
195 x25_start_t23timer(sk);
196 return 0;
197}
198
199
200
201
202
203
204static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
205{
206 int queued = 0;
207 int modulus;
208 struct x25_sock *x25 = x25_sk(sk);
209
210 modulus = (x25->neighbour->extended) ? X25_EMODULUS : X25_SMODULUS;
211
212 switch (frametype) {
213
214 case X25_RESET_REQUEST:
215 x25_write_internal(sk, X25_RESET_CONFIRMATION);
216 x25_stop_timer(sk);
217 x25->condition = 0x00;
218 x25->vs = 0;
219 x25->vr = 0;
220 x25->va = 0;
221 x25->vl = 0;
222 x25_requeue_frames(sk);
223 break;
224
225 case X25_CLEAR_REQUEST:
226 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
227 goto out_clear;
228
229 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
230 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
231 break;
232
233 case X25_RR:
234 case X25_RNR:
235 if (!x25_validate_nr(sk, nr)) {
236 x25_clear_queues(sk);
237 x25_write_internal(sk, X25_RESET_REQUEST);
238 x25_start_t22timer(sk);
239 x25->condition = 0x00;
240 x25->vs = 0;
241 x25->vr = 0;
242 x25->va = 0;
243 x25->vl = 0;
244 x25->state = X25_STATE_4;
245 } else {
246 x25_frames_acked(sk, nr);
247 if (frametype == X25_RNR) {
248 x25->condition |= X25_COND_PEER_RX_BUSY;
249 } else {
250 x25->condition &= ~X25_COND_PEER_RX_BUSY;
251 }
252 }
253 break;
254
255 case X25_DATA:
256 x25->condition &= ~X25_COND_PEER_RX_BUSY;
257 if ((ns != x25->vr) || !x25_validate_nr(sk, nr)) {
258 x25_clear_queues(sk);
259 x25_write_internal(sk, X25_RESET_REQUEST);
260 x25_start_t22timer(sk);
261 x25->condition = 0x00;
262 x25->vs = 0;
263 x25->vr = 0;
264 x25->va = 0;
265 x25->vl = 0;
266 x25->state = X25_STATE_4;
267 break;
268 }
269 x25_frames_acked(sk, nr);
270 if (ns == x25->vr) {
271 if (x25_queue_rx_frame(sk, skb, m) == 0) {
272 x25->vr = (x25->vr + 1) % modulus;
273 queued = 1;
274 } else {
275
276 x25_clear_queues(sk);
277 x25_write_internal(sk, X25_RESET_REQUEST);
278 x25_start_t22timer(sk);
279 x25->condition = 0x00;
280 x25->vs = 0;
281 x25->vr = 0;
282 x25->va = 0;
283 x25->vl = 0;
284 x25->state = X25_STATE_4;
285 break;
286 }
287 if (atomic_read(&sk->sk_rmem_alloc) >
288 (sk->sk_rcvbuf >> 1))
289 x25->condition |= X25_COND_OWN_RX_BUSY;
290 }
291
292
293
294
295 if (((x25->vl + x25->facilities.winsize_in) % modulus) == x25->vr) {
296 x25->condition &= ~X25_COND_ACK_PENDING;
297 x25_stop_timer(sk);
298 x25_enquiry_response(sk);
299 } else {
300 x25->condition |= X25_COND_ACK_PENDING;
301 x25_start_t2timer(sk);
302 }
303 break;
304
305 case X25_INTERRUPT_CONFIRMATION:
306 clear_bit(X25_INTERRUPT_FLAG, &x25->flags);
307 break;
308
309 case X25_INTERRUPT:
310 if (sock_flag(sk, SOCK_URGINLINE))
311 queued = !sock_queue_rcv_skb(sk, skb);
312 else {
313 skb_set_owner_r(skb, sk);
314 skb_queue_tail(&x25->interrupt_in_queue, skb);
315 queued = 1;
316 }
317 sk_send_sigurg(sk);
318 x25_write_internal(sk, X25_INTERRUPT_CONFIRMATION);
319 break;
320
321 default:
322 pr_warn("unknown %02X in state 3\n", frametype);
323 break;
324 }
325
326 return queued;
327
328out_clear:
329 x25_write_internal(sk, X25_CLEAR_REQUEST);
330 x25->state = X25_STATE_2;
331 x25_start_t23timer(sk);
332 return 0;
333}
334
335
336
337
338
339
340static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype)
341{
342 struct x25_sock *x25 = x25_sk(sk);
343
344 switch (frametype) {
345
346 case X25_RESET_REQUEST:
347 x25_write_internal(sk, X25_RESET_CONFIRMATION);
348 case X25_RESET_CONFIRMATION: {
349 x25_stop_timer(sk);
350 x25->condition = 0x00;
351 x25->va = 0;
352 x25->vr = 0;
353 x25->vs = 0;
354 x25->vl = 0;
355 x25->state = X25_STATE_3;
356 x25_requeue_frames(sk);
357 break;
358 }
359 case X25_CLEAR_REQUEST:
360 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
361 goto out_clear;
362
363 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
364 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
365 break;
366
367 default:
368 break;
369 }
370
371 return 0;
372
373out_clear:
374 x25_write_internal(sk, X25_CLEAR_REQUEST);
375 x25->state = X25_STATE_2;
376 x25_start_t23timer(sk);
377 return 0;
378}
379
380
381int x25_process_rx_frame(struct sock *sk, struct sk_buff *skb)
382{
383 struct x25_sock *x25 = x25_sk(sk);
384 int queued = 0, frametype, ns, nr, q, d, m;
385
386 if (x25->state == X25_STATE_0)
387 return 0;
388
389 frametype = x25_decode(sk, skb, &ns, &nr, &q, &d, &m);
390
391 switch (x25->state) {
392 case X25_STATE_1:
393 queued = x25_state1_machine(sk, skb, frametype);
394 break;
395 case X25_STATE_2:
396 queued = x25_state2_machine(sk, skb, frametype);
397 break;
398 case X25_STATE_3:
399 queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m);
400 break;
401 case X25_STATE_4:
402 queued = x25_state4_machine(sk, skb, frametype);
403 break;
404 }
405
406 x25_kick(sk);
407
408 return queued;
409}
410
411int x25_backlog_rcv(struct sock *sk, struct sk_buff *skb)
412{
413 int queued = x25_process_rx_frame(sk, skb);
414
415 if (!queued)
416 kfree_skb(skb);
417
418 return 0;
419}
420