1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12#include <linux/module.h>
13#include <linux/skbuff.h>
14#include <linux/ip.h>
15#include <linux/gfp.h>
16#include <linux/ipv6.h>
17#include <linux/tcp.h>
18#include <net/dst.h>
19#include <net/flow.h>
20#include <net/ipv6.h>
21#include <net/route.h>
22#include <net/tcp.h>
23
24#include <linux/netfilter_ipv4/ip_tables.h>
25#include <linux/netfilter_ipv6/ip6_tables.h>
26#include <linux/netfilter/x_tables.h>
27#include <linux/netfilter/xt_tcpudp.h>
28#include <linux/netfilter/xt_TCPMSS.h>
29
30MODULE_LICENSE("GPL");
31MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
32MODULE_DESCRIPTION("Xtables: TCP Maximum Segment Size (MSS) adjustment");
33MODULE_ALIAS("ipt_TCPMSS");
34MODULE_ALIAS("ip6t_TCPMSS");
35
36static inline unsigned int
37optlen(const u_int8_t *opt, unsigned int offset)
38{
39
40 if (opt[offset] <= TCPOPT_NOP || opt[offset+1] == 0)
41 return 1;
42 else
43 return opt[offset+1];
44}
45
46static u_int32_t tcpmss_reverse_mtu(struct net *net,
47 const struct sk_buff *skb,
48 unsigned int family)
49{
50 struct flowi fl;
51 struct rtable *rt = NULL;
52 u_int32_t mtu = ~0U;
53
54 if (family == PF_INET) {
55 struct flowi4 *fl4 = &fl.u.ip4;
56 memset(fl4, 0, sizeof(*fl4));
57 fl4->daddr = ip_hdr(skb)->saddr;
58 } else {
59 struct flowi6 *fl6 = &fl.u.ip6;
60
61 memset(fl6, 0, sizeof(*fl6));
62 fl6->daddr = ipv6_hdr(skb)->saddr;
63 }
64
65 nf_route(net, (struct dst_entry **)&rt, &fl, false, family);
66 if (rt != NULL) {
67 mtu = dst_mtu(&rt->dst);
68 dst_release(&rt->dst);
69 }
70 return mtu;
71}
72
73static int
74tcpmss_mangle_packet(struct sk_buff *skb,
75 const struct xt_action_param *par,
76 unsigned int family,
77 unsigned int tcphoff,
78 unsigned int minlen)
79{
80 const struct xt_tcpmss_info *info = par->targinfo;
81 struct tcphdr *tcph;
82 int len, tcp_hdrlen;
83 unsigned int i;
84 __be16 oldval;
85 u16 newmss;
86 u8 *opt;
87
88
89 if (par->fragoff != 0)
90 return 0;
91
92 if (!skb_make_writable(skb, skb->len))
93 return -1;
94
95 len = skb->len - tcphoff;
96 if (len < (int)sizeof(struct tcphdr))
97 return -1;
98
99 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
100 tcp_hdrlen = tcph->doff * 4;
101
102 if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
103 return -1;
104
105 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
106 struct net *net = xt_net(par);
107 unsigned int in_mtu = tcpmss_reverse_mtu(net, skb, family);
108 unsigned int min_mtu = min(dst_mtu(skb_dst(skb)), in_mtu);
109
110 if (min_mtu <= minlen) {
111 net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
112 min_mtu);
113 return -1;
114 }
115 newmss = min_mtu - minlen;
116 } else
117 newmss = info->mss;
118
119 opt = (u_int8_t *)tcph;
120 for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
121 if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
122 u_int16_t oldmss;
123
124 oldmss = (opt[i+2] << 8) | opt[i+3];
125
126
127
128
129
130 if (oldmss <= newmss)
131 return 0;
132
133 opt[i+2] = (newmss & 0xff00) >> 8;
134 opt[i+3] = newmss & 0x00ff;
135
136 inet_proto_csum_replace2(&tcph->check, skb,
137 htons(oldmss), htons(newmss),
138 false);
139 return 0;
140 }
141 }
142
143
144
145
146
147 if (len > tcp_hdrlen)
148 return 0;
149
150
151 if (tcp_hdrlen >= 15 * 4)
152 return 0;
153
154
155
156
157 if (skb_tailroom(skb) < TCPOLEN_MSS) {
158 if (pskb_expand_head(skb, 0,
159 TCPOLEN_MSS - skb_tailroom(skb),
160 GFP_ATOMIC))
161 return -1;
162 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
163 }
164
165 skb_put(skb, TCPOLEN_MSS);
166
167
168
169
170
171
172
173
174 if (xt_family(par) == NFPROTO_IPV4)
175 newmss = min(newmss, (u16)536);
176 else
177 newmss = min(newmss, (u16)1220);
178
179 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
180 memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
181
182 inet_proto_csum_replace2(&tcph->check, skb,
183 htons(len), htons(len + TCPOLEN_MSS), true);
184 opt[0] = TCPOPT_MSS;
185 opt[1] = TCPOLEN_MSS;
186 opt[2] = (newmss & 0xff00) >> 8;
187 opt[3] = newmss & 0x00ff;
188
189 inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), false);
190
191 oldval = ((__be16 *)tcph)[6];
192 tcph->doff += TCPOLEN_MSS/4;
193 inet_proto_csum_replace2(&tcph->check, skb,
194 oldval, ((__be16 *)tcph)[6], false);
195 return TCPOLEN_MSS;
196}
197
198static unsigned int
199tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
200{
201 struct iphdr *iph = ip_hdr(skb);
202 __be16 newlen;
203 int ret;
204
205 ret = tcpmss_mangle_packet(skb, par,
206 PF_INET,
207 iph->ihl * 4,
208 sizeof(*iph) + sizeof(struct tcphdr));
209 if (ret < 0)
210 return NF_DROP;
211 if (ret > 0) {
212 iph = ip_hdr(skb);
213 newlen = htons(ntohs(iph->tot_len) + ret);
214 csum_replace2(&iph->check, iph->tot_len, newlen);
215 iph->tot_len = newlen;
216 }
217 return XT_CONTINUE;
218}
219
220#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
221static unsigned int
222tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
223{
224 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
225 u8 nexthdr;
226 __be16 frag_off, oldlen, newlen;
227 int tcphoff;
228 int ret;
229
230 nexthdr = ipv6h->nexthdr;
231 tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
232 if (tcphoff < 0)
233 return NF_DROP;
234 ret = tcpmss_mangle_packet(skb, par,
235 PF_INET6,
236 tcphoff,
237 sizeof(*ipv6h) + sizeof(struct tcphdr));
238 if (ret < 0)
239 return NF_DROP;
240 if (ret > 0) {
241 ipv6h = ipv6_hdr(skb);
242 oldlen = ipv6h->payload_len;
243 newlen = htons(ntohs(oldlen) + ret);
244 if (skb->ip_summed == CHECKSUM_COMPLETE)
245 skb->csum = csum_add(csum_sub(skb->csum, oldlen),
246 newlen);
247 ipv6h->payload_len = newlen;
248 }
249 return XT_CONTINUE;
250}
251#endif
252
253
254static inline bool find_syn_match(const struct xt_entry_match *m)
255{
256 const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data;
257
258 if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
259 tcpinfo->flg_cmp & TCPHDR_SYN &&
260 !(tcpinfo->invflags & XT_TCP_INV_FLAGS))
261 return true;
262
263 return false;
264}
265
266static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
267{
268 const struct xt_tcpmss_info *info = par->targinfo;
269 const struct ipt_entry *e = par->entryinfo;
270 const struct xt_entry_match *ematch;
271
272 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
273 (par->hook_mask & ~((1 << NF_INET_FORWARD) |
274 (1 << NF_INET_LOCAL_OUT) |
275 (1 << NF_INET_POST_ROUTING))) != 0) {
276 pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n");
277 return -EINVAL;
278 }
279 if (par->nft_compat)
280 return 0;
281
282 xt_ematch_foreach(ematch, e)
283 if (find_syn_match(ematch))
284 return 0;
285 pr_info_ratelimited("Only works on TCP SYN packets\n");
286 return -EINVAL;
287}
288
289#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
290static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
291{
292 const struct xt_tcpmss_info *info = par->targinfo;
293 const struct ip6t_entry *e = par->entryinfo;
294 const struct xt_entry_match *ematch;
295
296 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
297 (par->hook_mask & ~((1 << NF_INET_FORWARD) |
298 (1 << NF_INET_LOCAL_OUT) |
299 (1 << NF_INET_POST_ROUTING))) != 0) {
300 pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n");
301 return -EINVAL;
302 }
303 if (par->nft_compat)
304 return 0;
305
306 xt_ematch_foreach(ematch, e)
307 if (find_syn_match(ematch))
308 return 0;
309 pr_info_ratelimited("Only works on TCP SYN packets\n");
310 return -EINVAL;
311}
312#endif
313
314static struct xt_target tcpmss_tg_reg[] __read_mostly = {
315 {
316 .family = NFPROTO_IPV4,
317 .name = "TCPMSS",
318 .checkentry = tcpmss_tg4_check,
319 .target = tcpmss_tg4,
320 .targetsize = sizeof(struct xt_tcpmss_info),
321 .proto = IPPROTO_TCP,
322 .me = THIS_MODULE,
323 },
324#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
325 {
326 .family = NFPROTO_IPV6,
327 .name = "TCPMSS",
328 .checkentry = tcpmss_tg6_check,
329 .target = tcpmss_tg6,
330 .targetsize = sizeof(struct xt_tcpmss_info),
331 .proto = IPPROTO_TCP,
332 .me = THIS_MODULE,
333 },
334#endif
335};
336
337static int __init tcpmss_tg_init(void)
338{
339 return xt_register_targets(tcpmss_tg_reg, ARRAY_SIZE(tcpmss_tg_reg));
340}
341
342static void __exit tcpmss_tg_exit(void)
343{
344 xt_unregister_targets(tcpmss_tg_reg, ARRAY_SIZE(tcpmss_tg_reg));
345}
346
347module_init(tcpmss_tg_init);
348module_exit(tcpmss_tg_exit);
349