1
2
3
4
5
6
7
8
9
10#ifndef SUNRPC_SVC_H
11#define SUNRPC_SVC_H
12
13#include <linux/in.h>
14#include <linux/in6.h>
15#include <linux/sunrpc/types.h>
16#include <linux/sunrpc/xdr.h>
17#include <linux/sunrpc/auth.h>
18#include <linux/sunrpc/svcauth.h>
19#include <linux/wait.h>
20#include <linux/mm.h>
21
22
23struct svc_pool_stats {
24 atomic_long_t packets;
25 unsigned long sockets_queued;
26 atomic_long_t threads_woken;
27 atomic_long_t threads_timedout;
28};
29
30
31
32
33
34
35
36
37
38
39
40struct svc_pool {
41 unsigned int sp_id;
42 spinlock_t sp_lock;
43 struct list_head sp_sockets;
44 unsigned int sp_nrthreads;
45 struct list_head sp_all_threads;
46 struct svc_pool_stats sp_stats;
47#define SP_TASK_PENDING (0)
48
49#define SP_CONGESTED (1)
50 unsigned long sp_flags;
51} ____cacheline_aligned_in_smp;
52
53struct svc_serv;
54
55struct svc_serv_ops {
56
57 void (*svo_shutdown)(struct svc_serv *, struct net *);
58
59
60 int (*svo_function)(void *);
61
62
63 void (*svo_enqueue_xprt)(struct svc_xprt *);
64
65
66 int (*svo_setup)(struct svc_serv *, struct svc_pool *, int);
67
68
69 struct module *svo_module;
70};
71
72
73
74
75
76
77
78
79
80
81
82struct svc_serv {
83 struct svc_program * sv_program;
84 struct svc_stat * sv_stats;
85 spinlock_t sv_lock;
86 unsigned int sv_nrthreads;
87 unsigned int sv_maxconn;
88
89
90
91 unsigned int sv_max_payload;
92 unsigned int sv_max_mesg;
93 unsigned int sv_xdrsize;
94 struct list_head sv_permsocks;
95 struct list_head sv_tempsocks;
96 int sv_tmpcnt;
97 struct timer_list sv_temptimer;
98
99 char * sv_name;
100
101 unsigned int sv_nrpools;
102 struct svc_pool * sv_pools;
103 struct svc_serv_ops *sv_ops;
104#if defined(CONFIG_SUNRPC_BACKCHANNEL)
105 struct list_head sv_cb_list;
106
107
108 spinlock_t sv_cb_lock;
109 wait_queue_head_t sv_cb_waitq;
110
111 bool sv_bc_enabled;
112#endif
113};
114
115
116
117
118
119
120
121static inline void svc_get(struct svc_serv *serv)
122{
123 serv->sv_nrthreads++;
124}
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147#define RPCSVC_MAXPAYLOAD (1*1024*1024u)
148#define RPCSVC_MAXPAYLOAD_TCP RPCSVC_MAXPAYLOAD
149#define RPCSVC_MAXPAYLOAD_UDP (32*1024u)
150
151extern u32 svc_max_payload(const struct svc_rqst *rqstp);
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \
181 + 2 + 1)
182
183static inline u32 svc_getnl(struct kvec *iov)
184{
185 __be32 val, *vp;
186 vp = iov->iov_base;
187 val = *vp++;
188 iov->iov_base = (void*)vp;
189 iov->iov_len -= sizeof(__be32);
190 return ntohl(val);
191}
192
193static inline void svc_putnl(struct kvec *iov, u32 val)
194{
195 __be32 *vp = iov->iov_base + iov->iov_len;
196 *vp = htonl(val);
197 iov->iov_len += sizeof(__be32);
198}
199
200static inline __be32 svc_getu32(struct kvec *iov)
201{
202 __be32 val, *vp;
203 vp = iov->iov_base;
204 val = *vp++;
205 iov->iov_base = (void*)vp;
206 iov->iov_len -= sizeof(__be32);
207 return val;
208}
209
210static inline void svc_ungetu32(struct kvec *iov)
211{
212 __be32 *vp = (__be32 *)iov->iov_base;
213 iov->iov_base = (void *)(vp - 1);
214 iov->iov_len += sizeof(*vp);
215}
216
217static inline void svc_putu32(struct kvec *iov, __be32 val)
218{
219 __be32 *vp = iov->iov_base + iov->iov_len;
220 *vp = val;
221 iov->iov_len += sizeof(__be32);
222}
223
224
225
226
227
228struct svc_rqst {
229 struct list_head rq_all;
230 struct rcu_head rq_rcu_head;
231 struct svc_xprt * rq_xprt;
232
233 struct sockaddr_storage rq_addr;
234 size_t rq_addrlen;
235 struct sockaddr_storage rq_daddr;
236
237 size_t rq_daddrlen;
238
239 struct svc_serv * rq_server;
240 struct svc_pool * rq_pool;
241 struct svc_procedure * rq_procinfo;
242 struct auth_ops * rq_authop;
243 struct svc_cred rq_cred;
244 void * rq_xprt_ctxt;
245 struct svc_deferred_req*rq_deferred;
246
247 size_t rq_xprt_hlen;
248 struct xdr_buf rq_arg;
249 struct xdr_buf rq_res;
250 struct page * rq_pages[RPCSVC_MAXPAGES];
251 struct page * *rq_respages;
252 struct page * *rq_next_page;
253 struct page * *rq_page_end;
254
255 struct kvec rq_vec[RPCSVC_MAXPAGES];
256
257 __be32 rq_xid;
258 u32 rq_prog;
259 u32 rq_vers;
260 u32 rq_proc;
261 u32 rq_prot;
262 int rq_cachetype;
263#define RQ_SECURE (0)
264#define RQ_LOCAL (1)
265#define RQ_USEDEFERRAL (2)
266#define RQ_DROPME (3)
267#define RQ_SPLICE_OK (4)
268
269
270#define RQ_VICTIM (5)
271#define RQ_BUSY (6)
272#define RQ_DATA (7)
273 unsigned long rq_flags;
274
275 void * rq_argp;
276 void * rq_resp;
277 void * rq_auth_data;
278 int rq_auth_slack;
279
280
281
282 int rq_reserved;
283
284
285
286 struct cache_req rq_chandle;
287
288
289
290 struct auth_domain * rq_client;
291 struct auth_domain * rq_gssclient;
292 struct svc_cacherep * rq_cacherep;
293 struct task_struct *rq_task;
294 spinlock_t rq_lock;
295 struct net *rq_bc_net;
296
297
298};
299
300#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
301
302
303
304
305static inline struct sockaddr_in *svc_addr_in(const struct svc_rqst *rqst)
306{
307 return (struct sockaddr_in *) &rqst->rq_addr;
308}
309
310static inline struct sockaddr_in6 *svc_addr_in6(const struct svc_rqst *rqst)
311{
312 return (struct sockaddr_in6 *) &rqst->rq_addr;
313}
314
315static inline struct sockaddr *svc_addr(const struct svc_rqst *rqst)
316{
317 return (struct sockaddr *) &rqst->rq_addr;
318}
319
320static inline struct sockaddr_in *svc_daddr_in(const struct svc_rqst *rqst)
321{
322 return (struct sockaddr_in *) &rqst->rq_daddr;
323}
324
325static inline struct sockaddr_in6 *svc_daddr_in6(const struct svc_rqst *rqst)
326{
327 return (struct sockaddr_in6 *) &rqst->rq_daddr;
328}
329
330static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
331{
332 return (struct sockaddr *) &rqst->rq_daddr;
333}
334
335
336
337
338static inline int
339xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
340{
341 char *cp = (char *)p;
342 struct kvec *vec = &rqstp->rq_arg.head[0];
343 return cp >= (char*)vec->iov_base
344 && cp <= (char*)vec->iov_base + vec->iov_len;
345}
346
347static inline int
348xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p)
349{
350 struct kvec *vec = &rqstp->rq_res.head[0];
351 char *cp = (char*)p;
352
353 vec->iov_len = cp - (char*)vec->iov_base;
354
355 return vec->iov_len <= PAGE_SIZE;
356}
357
358static inline void svc_free_res_pages(struct svc_rqst *rqstp)
359{
360 while (rqstp->rq_next_page != rqstp->rq_respages) {
361 struct page **pp = --rqstp->rq_next_page;
362 if (*pp) {
363 put_page(*pp);
364 *pp = NULL;
365 }
366 }
367}
368
369struct svc_deferred_req {
370 u32 prot;
371 struct svc_xprt *xprt;
372 struct sockaddr_storage addr;
373 size_t addrlen;
374 struct sockaddr_storage daddr;
375 size_t daddrlen;
376 struct cache_deferred_req handle;
377 size_t xprt_hlen;
378 int argslen;
379 __be32 args[0];
380};
381
382
383
384
385struct svc_program {
386 struct svc_program * pg_next;
387 u32 pg_prog;
388 unsigned int pg_lovers;
389 unsigned int pg_hivers;
390 unsigned int pg_nvers;
391 struct svc_version ** pg_vers;
392 char * pg_name;
393 char * pg_class;
394 struct svc_stat * pg_stats;
395 int (*pg_authenticate)(struct svc_rqst *);
396};
397
398
399
400
401struct svc_version {
402 u32 vs_vers;
403 u32 vs_nproc;
404 struct svc_procedure * vs_proc;
405 u32 vs_xdrsize;
406
407
408 bool vs_hidden;
409
410
411 bool vs_rpcb_optnl;
412
413
414 bool vs_need_cong_ctrl;
415
416
417
418
419
420 int (*vs_dispatch)(struct svc_rqst *, __be32 *);
421};
422
423
424
425
426typedef __be32 (*svc_procfunc)(struct svc_rqst *, void *argp, void *resp);
427struct svc_procedure {
428 svc_procfunc pc_func;
429 kxdrproc_t pc_decode;
430 kxdrproc_t pc_encode;
431 kxdrproc_t pc_release;
432 unsigned int pc_argsize;
433 unsigned int pc_ressize;
434 unsigned int pc_count;
435 unsigned int pc_cachetype;
436 unsigned int pc_xdrressize;
437};
438
439
440
441
442enum {
443 SVC_POOL_AUTO = -1,
444 SVC_POOL_GLOBAL,
445
446 SVC_POOL_PERCPU,
447 SVC_POOL_PERNODE
448};
449
450struct svc_pool_map {
451 int count;
452 int mode;
453
454
455 unsigned int npools;
456 unsigned int *pool_to;
457 unsigned int *to_pool;
458};
459
460extern struct svc_pool_map svc_pool_map;
461
462
463
464
465int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
466void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
467int svc_bind(struct svc_serv *serv, struct net *net);
468struct svc_serv *svc_create(struct svc_program *, unsigned int,
469 struct svc_serv_ops *);
470struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
471 struct svc_pool *pool, int node);
472struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
473 struct svc_pool *pool, int node);
474void svc_rqst_free(struct svc_rqst *);
475void svc_exit_thread(struct svc_rqst *);
476unsigned int svc_pool_map_get(void);
477void svc_pool_map_put(void);
478struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
479 struct svc_serv_ops *);
480int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
481int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
482void svc_destroy(struct svc_serv *);
483void svc_shutdown_net(struct svc_serv *, struct net *);
484int svc_process(struct svc_rqst *);
485int bc_svc_process(struct svc_serv *, struct rpc_rqst *,
486 struct svc_rqst *);
487int svc_register(const struct svc_serv *, struct net *, const int,
488 const unsigned short, const unsigned short);
489
490void svc_wake_up(struct svc_serv *);
491void svc_reserve(struct svc_rqst *rqstp, int space);
492struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu);
493char * svc_print_addr(struct svc_rqst *, char *, size_t);
494unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
495 struct kvec *first, size_t total);
496
497#define RPC_MAX_ADDRBUFLEN (63U)
498
499
500
501
502
503
504
505
506static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space)
507{
508 svc_reserve(rqstp, space + rqstp->rq_auth_slack);
509}
510
511#endif
512