1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#define KMSG_COMPONENT "IPVS"
41#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
42
43#include <linux/ip.h>
44#include <linux/slab.h>
45#include <linux/module.h>
46#include <linux/kernel.h>
47#include <linux/skbuff.h>
48
49#include <net/ip_vs.h>
50
51#include <net/tcp.h>
52#include <linux/udp.h>
53#include <linux/sctp.h>
54
55
56
57
58
59struct ip_vs_sh_bucket {
60 struct ip_vs_dest __rcu *dest;
61};
62
63
64
65
66#ifndef CONFIG_IP_VS_SH_TAB_BITS
67#define CONFIG_IP_VS_SH_TAB_BITS 8
68#endif
69#define IP_VS_SH_TAB_BITS CONFIG_IP_VS_SH_TAB_BITS
70#define IP_VS_SH_TAB_SIZE (1 << IP_VS_SH_TAB_BITS)
71#define IP_VS_SH_TAB_MASK (IP_VS_SH_TAB_SIZE - 1)
72
73struct ip_vs_sh_state {
74 struct rcu_head rcu_head;
75 struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE];
76};
77
78
79static inline bool is_unavailable(struct ip_vs_dest *dest)
80{
81 return atomic_read(&dest->weight) <= 0 ||
82 dest->flags & IP_VS_DEST_F_OVERLOAD;
83}
84
85
86
87
88static inline unsigned int
89ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr,
90 __be16 port, unsigned int offset)
91{
92 __be32 addr_fold = addr->ip;
93
94#ifdef CONFIG_IP_VS_IPV6
95 if (af == AF_INET6)
96 addr_fold = addr->ip6[0]^addr->ip6[1]^
97 addr->ip6[2]^addr->ip6[3];
98#endif
99 return (offset + (ntohs(port) + ntohl(addr_fold))*2654435761UL) &
100 IP_VS_SH_TAB_MASK;
101}
102
103
104
105
106
107static inline struct ip_vs_dest *
108ip_vs_sh_get(struct ip_vs_service *svc, struct ip_vs_sh_state *s,
109 const union nf_inet_addr *addr, __be16 port)
110{
111 unsigned int hash = ip_vs_sh_hashkey(svc->af, addr, port, 0);
112 struct ip_vs_dest *dest = rcu_dereference(s->buckets[hash].dest);
113
114 return (!dest || is_unavailable(dest)) ? NULL : dest;
115}
116
117
118
119
120
121
122
123
124static inline struct ip_vs_dest *
125ip_vs_sh_get_fallback(struct ip_vs_service *svc, struct ip_vs_sh_state *s,
126 const union nf_inet_addr *addr, __be16 port)
127{
128 unsigned int offset, roffset;
129 unsigned int hash, ihash;
130 struct ip_vs_dest *dest;
131
132
133 ihash = ip_vs_sh_hashkey(svc->af, addr, port, 0);
134 dest = rcu_dereference(s->buckets[ihash].dest);
135 if (!dest)
136 return NULL;
137 if (!is_unavailable(dest))
138 return dest;
139
140 IP_VS_DBG_BUF(6, "SH: selected unavailable server %s:%d, reselecting",
141 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port));
142
143
144
145
146 for (offset = 0; offset < IP_VS_SH_TAB_SIZE; offset++) {
147 roffset = (offset + ihash) % IP_VS_SH_TAB_SIZE;
148 hash = ip_vs_sh_hashkey(svc->af, addr, port, roffset);
149 dest = rcu_dereference(s->buckets[hash].dest);
150 if (!dest)
151 break;
152 if (!is_unavailable(dest))
153 return dest;
154 IP_VS_DBG_BUF(6, "SH: selected unavailable "
155 "server %s:%d (offset %d), reselecting",
156 IP_VS_DBG_ADDR(dest->af, &dest->addr),
157 ntohs(dest->port), roffset);
158 }
159
160 return NULL;
161}
162
163
164
165
166static int
167ip_vs_sh_reassign(struct ip_vs_sh_state *s, struct ip_vs_service *svc)
168{
169 int i;
170 struct ip_vs_sh_bucket *b;
171 struct list_head *p;
172 struct ip_vs_dest *dest;
173 int d_count;
174 bool empty;
175
176 b = &s->buckets[0];
177 p = &svc->destinations;
178 empty = list_empty(p);
179 d_count = 0;
180 for (i=0; i<IP_VS_SH_TAB_SIZE; i++) {
181 dest = rcu_dereference_protected(b->dest, 1);
182 if (dest)
183 ip_vs_dest_put(dest);
184 if (empty)
185 RCU_INIT_POINTER(b->dest, NULL);
186 else {
187 if (p == &svc->destinations)
188 p = p->next;
189
190 dest = list_entry(p, struct ip_vs_dest, n_list);
191 ip_vs_dest_hold(dest);
192 RCU_INIT_POINTER(b->dest, dest);
193
194 IP_VS_DBG_BUF(6, "assigned i: %d dest: %s weight: %d\n",
195 i, IP_VS_DBG_ADDR(dest->af, &dest->addr),
196 atomic_read(&dest->weight));
197
198
199 if (++d_count >= atomic_read(&dest->weight)) {
200 p = p->next;
201 d_count = 0;
202 }
203
204 }
205 b++;
206 }
207 return 0;
208}
209
210
211
212
213
214static void ip_vs_sh_flush(struct ip_vs_sh_state *s)
215{
216 int i;
217 struct ip_vs_sh_bucket *b;
218 struct ip_vs_dest *dest;
219
220 b = &s->buckets[0];
221 for (i=0; i<IP_VS_SH_TAB_SIZE; i++) {
222 dest = rcu_dereference_protected(b->dest, 1);
223 if (dest) {
224 ip_vs_dest_put(dest);
225 RCU_INIT_POINTER(b->dest, NULL);
226 }
227 b++;
228 }
229}
230
231
232static int ip_vs_sh_init_svc(struct ip_vs_service *svc)
233{
234 struct ip_vs_sh_state *s;
235
236
237 s = kzalloc(sizeof(struct ip_vs_sh_state), GFP_KERNEL);
238 if (s == NULL)
239 return -ENOMEM;
240
241 svc->sched_data = s;
242 IP_VS_DBG(6, "SH hash table (memory=%Zdbytes) allocated for "
243 "current service\n",
244 sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE);
245
246
247 ip_vs_sh_reassign(s, svc);
248
249 return 0;
250}
251
252
253static void ip_vs_sh_done_svc(struct ip_vs_service *svc)
254{
255 struct ip_vs_sh_state *s = svc->sched_data;
256
257
258 ip_vs_sh_flush(s);
259
260
261 kfree_rcu(s, rcu_head);
262 IP_VS_DBG(6, "SH hash table (memory=%Zdbytes) released\n",
263 sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE);
264}
265
266
267static int ip_vs_sh_dest_changed(struct ip_vs_service *svc,
268 struct ip_vs_dest *dest)
269{
270 struct ip_vs_sh_state *s = svc->sched_data;
271
272
273 ip_vs_sh_reassign(s, svc);
274
275 return 0;
276}
277
278
279
280static inline __be16
281ip_vs_sh_get_port(const struct sk_buff *skb, struct ip_vs_iphdr *iph)
282{
283 __be16 _ports[2], *ports;
284
285
286
287
288
289
290 switch (iph->protocol) {
291 case IPPROTO_TCP:
292 case IPPROTO_UDP:
293 case IPPROTO_SCTP:
294 ports = skb_header_pointer(skb, iph->len, sizeof(_ports),
295 &_ports);
296 if (unlikely(!ports))
297 return 0;
298
299 if (likely(!ip_vs_iph_inverse(iph)))
300 return ports[0];
301 else
302 return ports[1];
303 default:
304 return 0;
305 }
306}
307
308
309
310
311
312static struct ip_vs_dest *
313ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
314 struct ip_vs_iphdr *iph)
315{
316 struct ip_vs_dest *dest;
317 struct ip_vs_sh_state *s;
318 __be16 port = 0;
319 const union nf_inet_addr *hash_addr;
320
321 hash_addr = ip_vs_iph_inverse(iph) ? &iph->daddr : &iph->saddr;
322
323 IP_VS_DBG(6, "ip_vs_sh_schedule(): Scheduling...\n");
324
325 if (svc->flags & IP_VS_SVC_F_SCHED_SH_PORT)
326 port = ip_vs_sh_get_port(skb, iph);
327
328 s = (struct ip_vs_sh_state *) svc->sched_data;
329
330 if (svc->flags & IP_VS_SVC_F_SCHED_SH_FALLBACK)
331 dest = ip_vs_sh_get_fallback(svc, s, hash_addr, port);
332 else
333 dest = ip_vs_sh_get(svc, s, hash_addr, port);
334
335 if (!dest) {
336 ip_vs_scheduler_err(svc, "no destination available");
337 return NULL;
338 }
339
340 IP_VS_DBG_BUF(6, "SH: source IP address %s --> server %s:%d\n",
341 IP_VS_DBG_ADDR(svc->af, hash_addr),
342 IP_VS_DBG_ADDR(dest->af, &dest->addr),
343 ntohs(dest->port));
344
345 return dest;
346}
347
348
349
350
351
352static struct ip_vs_scheduler ip_vs_sh_scheduler =
353{
354 .name = "sh",
355 .refcnt = ATOMIC_INIT(0),
356 .module = THIS_MODULE,
357 .n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list),
358 .init_service = ip_vs_sh_init_svc,
359 .done_service = ip_vs_sh_done_svc,
360 .add_dest = ip_vs_sh_dest_changed,
361 .del_dest = ip_vs_sh_dest_changed,
362 .upd_dest = ip_vs_sh_dest_changed,
363 .schedule = ip_vs_sh_schedule,
364};
365
366
367static int __init ip_vs_sh_init(void)
368{
369 return register_ip_vs_scheduler(&ip_vs_sh_scheduler);
370}
371
372
373static void __exit ip_vs_sh_cleanup(void)
374{
375 unregister_ip_vs_scheduler(&ip_vs_sh_scheduler);
376 synchronize_rcu();
377}
378
379
380module_init(ip_vs_sh_init);
381module_exit(ip_vs_sh_cleanup);
382MODULE_LICENSE("GPL");
383