1
2
3
4
5
6
7
8
9
10
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/module.h>
15#include <linux/net.h>
16#include <linux/skbuff.h>
17#include <linux/slab.h>
18#include <linux/udp.h>
19#include <linux/ip.h>
20#include <linux/hashtable.h>
21#include <net/sock.h>
22#include <net/af_rxrpc.h>
23#include "ar-internal.h"
24
25static void rxrpc_local_processor(struct work_struct *);
26static void rxrpc_local_rcu(struct rcu_head *);
27
28
29
30
31
32
33
34
35
36
37static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
38 const struct sockaddr_rxrpc *srx)
39{
40 long diff;
41
42 diff = ((local->srx.transport_type - srx->transport_type) ?:
43 (local->srx.transport_len - srx->transport_len) ?:
44 (local->srx.transport.family - srx->transport.family));
45 if (diff != 0)
46 return diff;
47
48 switch (srx->transport.family) {
49 case AF_INET:
50
51
52
53 return ((u16 __force)local->srx.transport.sin.sin_port -
54 (u16 __force)srx->transport.sin.sin_port) ?:
55 memcmp(&local->srx.transport.sin.sin_addr,
56 &srx->transport.sin.sin_addr,
57 sizeof(struct in_addr));
58#ifdef CONFIG_AF_RXRPC_IPV6
59 case AF_INET6:
60
61
62
63 return ((u16 __force)local->srx.transport.sin6.sin6_port -
64 (u16 __force)srx->transport.sin6.sin6_port) ?:
65 memcmp(&local->srx.transport.sin6.sin6_addr,
66 &srx->transport.sin6.sin6_addr,
67 sizeof(struct in6_addr));
68#endif
69 default:
70 BUG();
71 }
72}
73
74
75
76
77static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
78 const struct sockaddr_rxrpc *srx)
79{
80 struct rxrpc_local *local;
81
82 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
83 if (local) {
84 atomic_set(&local->usage, 1);
85 local->rxnet = rxnet;
86 INIT_LIST_HEAD(&local->link);
87 INIT_WORK(&local->processor, rxrpc_local_processor);
88 init_rwsem(&local->defrag_sem);
89 skb_queue_head_init(&local->reject_queue);
90 skb_queue_head_init(&local->event_queue);
91 local->client_conns = RB_ROOT;
92 spin_lock_init(&local->client_conns_lock);
93 spin_lock_init(&local->lock);
94 rwlock_init(&local->services_lock);
95 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
96 memcpy(&local->srx, srx, sizeof(*srx));
97 local->srx.srx_service = 0;
98 trace_rxrpc_local(local, rxrpc_local_new, 1, NULL);
99 }
100
101 _leave(" = %p", local);
102 return local;
103}
104
105
106
107
108
109static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
110{
111 struct sock *sock;
112 int ret, opt;
113
114 _enter("%p{%d,%d}",
115 local, local->srx.transport_type, local->srx.transport.family);
116
117
118 ret = sock_create_kern(net, local->srx.transport.family,
119 local->srx.transport_type, 0, &local->socket);
120 if (ret < 0) {
121 _leave(" = %d [socket]", ret);
122 return ret;
123 }
124
125
126 if (local->srx.transport_len > sizeof(sa_family_t)) {
127 _debug("bind");
128 ret = kernel_bind(local->socket,
129 (struct sockaddr *)&local->srx.transport,
130 local->srx.transport_len);
131 if (ret < 0) {
132 _debug("bind failed %d", ret);
133 goto error;
134 }
135 }
136
137 switch (local->srx.transport.family) {
138 case AF_INET:
139
140 opt = 1;
141 ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
142 (char *) &opt, sizeof(opt));
143 if (ret < 0) {
144 _debug("setsockopt failed");
145 goto error;
146 }
147
148
149 opt = IP_PMTUDISC_DO;
150 ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
151 (char *) &opt, sizeof(opt));
152 if (ret < 0) {
153 _debug("setsockopt failed");
154 goto error;
155 }
156 break;
157
158 case AF_INET6:
159
160 opt = 1;
161 ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
162 (char *) &opt, sizeof(opt));
163 if (ret < 0) {
164 _debug("setsockopt failed");
165 goto error;
166 }
167
168
169 opt = IPV6_PMTUDISC_DO;
170 ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
171 (char *) &opt, sizeof(opt));
172 if (ret < 0) {
173 _debug("setsockopt failed");
174 goto error;
175 }
176 break;
177
178 default:
179 BUG();
180 }
181
182
183 sock = local->socket->sk;
184 sock->sk_user_data = local;
185 sock->sk_data_ready = rxrpc_data_ready;
186 sock->sk_error_report = rxrpc_error_report;
187 _leave(" = 0");
188 return 0;
189
190error:
191 kernel_sock_shutdown(local->socket, SHUT_RDWR);
192 local->socket->sk->sk_user_data = NULL;
193 sock_release(local->socket);
194 local->socket = NULL;
195
196 _leave(" = %d", ret);
197 return ret;
198}
199
200
201
202
203struct rxrpc_local *rxrpc_lookup_local(struct net *net,
204 const struct sockaddr_rxrpc *srx)
205{
206 struct rxrpc_local *local;
207 struct rxrpc_net *rxnet = rxrpc_net(net);
208 struct list_head *cursor;
209 const char *age;
210 long diff;
211 int ret;
212
213 _enter("{%d,%d,%pISp}",
214 srx->transport_type, srx->transport.family, &srx->transport);
215
216 mutex_lock(&rxnet->local_mutex);
217
218 for (cursor = rxnet->local_endpoints.next;
219 cursor != &rxnet->local_endpoints;
220 cursor = cursor->next) {
221 local = list_entry(cursor, struct rxrpc_local, link);
222
223 diff = rxrpc_local_cmp_key(local, srx);
224 if (diff < 0)
225 continue;
226 if (diff > 0)
227 break;
228
229
230
231
232
233
234 if (srx->srx_service) {
235 local = NULL;
236 goto addr_in_use;
237 }
238
239
240
241
242
243 if (!rxrpc_get_local_maybe(local)) {
244 cursor = cursor->next;
245 list_del_init(&local->link);
246 break;
247 }
248
249 age = "old";
250 goto found;
251 }
252
253 local = rxrpc_alloc_local(rxnet, srx);
254 if (!local)
255 goto nomem;
256
257 ret = rxrpc_open_socket(local, net);
258 if (ret < 0)
259 goto sock_error;
260
261 list_add_tail(&local->link, cursor);
262 age = "new";
263
264found:
265 mutex_unlock(&rxnet->local_mutex);
266
267 _net("LOCAL %s %d {%pISp}",
268 age, local->debug_id, &local->srx.transport);
269
270 _leave(" = %p", local);
271 return local;
272
273nomem:
274 ret = -ENOMEM;
275sock_error:
276 mutex_unlock(&rxnet->local_mutex);
277 kfree(local);
278 _leave(" = %d", ret);
279 return ERR_PTR(ret);
280
281addr_in_use:
282 mutex_unlock(&rxnet->local_mutex);
283 _leave(" = -EADDRINUSE");
284 return ERR_PTR(-EADDRINUSE);
285}
286
287
288
289
290struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
291{
292 const void *here = __builtin_return_address(0);
293 int n;
294
295 n = atomic_inc_return(&local->usage);
296 trace_rxrpc_local(local, rxrpc_local_got, n, here);
297 return local;
298}
299
300
301
302
303struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
304{
305 const void *here = __builtin_return_address(0);
306
307 if (local) {
308 int n = __atomic_add_unless(&local->usage, 1, 0);
309 if (n > 0)
310 trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
311 else
312 local = NULL;
313 }
314 return local;
315}
316
317
318
319
320void rxrpc_queue_local(struct rxrpc_local *local)
321{
322 const void *here = __builtin_return_address(0);
323
324 if (rxrpc_queue_work(&local->processor))
325 trace_rxrpc_local(local, rxrpc_local_queued,
326 atomic_read(&local->usage), here);
327}
328
329
330
331
332static void __rxrpc_put_local(struct rxrpc_local *local)
333{
334 _enter("%d", local->debug_id);
335 rxrpc_queue_work(&local->processor);
336}
337
338
339
340
341void rxrpc_put_local(struct rxrpc_local *local)
342{
343 const void *here = __builtin_return_address(0);
344 int n;
345
346 if (local) {
347 n = atomic_dec_return(&local->usage);
348 trace_rxrpc_local(local, rxrpc_local_put, n, here);
349
350 if (n == 0)
351 __rxrpc_put_local(local);
352 }
353}
354
355
356
357
358
359
360
361
362static void rxrpc_local_destroyer(struct rxrpc_local *local)
363{
364 struct socket *socket = local->socket;
365 struct rxrpc_net *rxnet = local->rxnet;
366
367 _enter("%d", local->debug_id);
368
369
370
371
372
373 if (local->dead) {
374 _leave(" [already dead]");
375 return;
376 }
377 local->dead = true;
378
379 mutex_lock(&rxnet->local_mutex);
380 list_del_init(&local->link);
381 mutex_unlock(&rxnet->local_mutex);
382
383 ASSERT(RB_EMPTY_ROOT(&local->client_conns));
384 ASSERT(!local->service);
385
386 if (socket) {
387 local->socket = NULL;
388 kernel_sock_shutdown(socket, SHUT_RDWR);
389 socket->sk->sk_user_data = NULL;
390 sock_release(socket);
391 }
392
393
394
395
396 rxrpc_purge_queue(&local->reject_queue);
397 rxrpc_purge_queue(&local->event_queue);
398
399 _debug("rcu local %d", local->debug_id);
400 call_rcu(&local->rcu, rxrpc_local_rcu);
401}
402
403
404
405
406static void rxrpc_local_processor(struct work_struct *work)
407{
408 struct rxrpc_local *local =
409 container_of(work, struct rxrpc_local, processor);
410 bool again;
411
412 trace_rxrpc_local(local, rxrpc_local_processing,
413 atomic_read(&local->usage), NULL);
414
415 do {
416 again = false;
417 if (atomic_read(&local->usage) == 0)
418 return rxrpc_local_destroyer(local);
419
420 if (!skb_queue_empty(&local->reject_queue)) {
421 rxrpc_reject_packets(local);
422 again = true;
423 }
424
425 if (!skb_queue_empty(&local->event_queue)) {
426 rxrpc_process_local_events(local);
427 again = true;
428 }
429 } while (again);
430}
431
432
433
434
435static void rxrpc_local_rcu(struct rcu_head *rcu)
436{
437 struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu);
438
439 _enter("%d", local->debug_id);
440
441 ASSERT(!work_pending(&local->processor));
442
443 _net("DESTROY LOCAL %d", local->debug_id);
444 kfree(local);
445 _leave("");
446}
447
448
449
450
451void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet)
452{
453 struct rxrpc_local *local;
454
455 _enter("");
456
457 flush_workqueue(rxrpc_workqueue);
458
459 if (!list_empty(&rxnet->local_endpoints)) {
460 mutex_lock(&rxnet->local_mutex);
461 list_for_each_entry(local, &rxnet->local_endpoints, link) {
462 pr_err("AF_RXRPC: Leaked local %p {%d}\n",
463 local, atomic_read(&local->usage));
464 }
465 mutex_unlock(&rxnet->local_mutex);
466 BUG();
467 }
468}
469