1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11#include <linux/module.h>
12#include <linux/skbuff.h>
13#include <linux/ip.h>
14#include <linux/gfp.h>
15#include <linux/ipv6.h>
16#include <linux/tcp.h>
17#include <net/dst.h>
18#include <net/flow.h>
19#include <net/ipv6.h>
20#include <net/route.h>
21#include <net/tcp.h>
22
23#include <linux/netfilter_ipv4/ip_tables.h>
24#include <linux/netfilter_ipv6/ip6_tables.h>
25#include <linux/netfilter/x_tables.h>
26#include <linux/netfilter/xt_tcpudp.h>
27#include <linux/netfilter/xt_TCPMSS.h>
28
29MODULE_LICENSE("GPL");
30MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
31MODULE_DESCRIPTION("Xtables: TCP Maximum Segment Size (MSS) adjustment");
32MODULE_ALIAS("ipt_TCPMSS");
33MODULE_ALIAS("ip6t_TCPMSS");
34
35static inline unsigned int
36optlen(const u_int8_t *opt, unsigned int offset)
37{
38
39 if (opt[offset] <= TCPOPT_NOP || opt[offset+1] == 0)
40 return 1;
41 else
42 return opt[offset+1];
43}
44
45static int
46tcpmss_mangle_packet(struct sk_buff *skb,
47 const struct xt_tcpmss_info *info,
48 unsigned int in_mtu,
49 unsigned int tcphoff,
50 unsigned int minlen)
51{
52 struct tcphdr *tcph;
53 unsigned int tcplen, i;
54 __be16 oldval;
55 u16 newmss;
56 u8 *opt;
57
58 if (!skb_make_writable(skb, skb->len))
59 return -1;
60
61 tcplen = skb->len - tcphoff;
62 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
63
64
65 if (tcplen < tcph->doff*4)
66 return -1;
67
68 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
69 if (dst_mtu(skb_dst(skb)) <= minlen) {
70 if (net_ratelimit())
71 pr_err("unknown or invalid path-MTU (%u)\n",
72 dst_mtu(skb_dst(skb)));
73 return -1;
74 }
75 if (in_mtu <= minlen) {
76 if (net_ratelimit())
77 pr_err("unknown or invalid path-MTU (%u)\n",
78 in_mtu);
79 return -1;
80 }
81 newmss = min(dst_mtu(skb_dst(skb)), in_mtu) - minlen;
82 } else
83 newmss = info->mss;
84
85 opt = (u_int8_t *)tcph;
86 for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
87 if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
88 opt[i+1] == TCPOLEN_MSS) {
89 u_int16_t oldmss;
90
91 oldmss = (opt[i+2] << 8) | opt[i+3];
92
93
94
95
96
97 if (oldmss <= newmss)
98 return 0;
99
100 opt[i+2] = (newmss & 0xff00) >> 8;
101 opt[i+3] = newmss & 0x00ff;
102
103 inet_proto_csum_replace2(&tcph->check, skb,
104 htons(oldmss), htons(newmss),
105 0);
106 return 0;
107 }
108 }
109
110
111
112
113 if (tcplen > tcph->doff*4)
114 return 0;
115
116
117
118
119 if (skb_tailroom(skb) < TCPOLEN_MSS) {
120 if (pskb_expand_head(skb, 0,
121 TCPOLEN_MSS - skb_tailroom(skb),
122 GFP_ATOMIC))
123 return -1;
124 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
125 }
126
127 skb_put(skb, TCPOLEN_MSS);
128
129 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
130 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
131
132 inet_proto_csum_replace2(&tcph->check, skb,
133 htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
134 opt[0] = TCPOPT_MSS;
135 opt[1] = TCPOLEN_MSS;
136 opt[2] = (newmss & 0xff00) >> 8;
137 opt[3] = newmss & 0x00ff;
138
139 inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), 0);
140
141 oldval = ((__be16 *)tcph)[6];
142 tcph->doff += TCPOLEN_MSS/4;
143 inet_proto_csum_replace2(&tcph->check, skb,
144 oldval, ((__be16 *)tcph)[6], 0);
145 return TCPOLEN_MSS;
146}
147
148static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
149 unsigned int family)
150{
151 struct flowi fl = {};
152 const struct nf_afinfo *ai;
153 struct rtable *rt = NULL;
154 u_int32_t mtu = ~0U;
155
156 if (family == PF_INET)
157 fl.fl4_dst = ip_hdr(skb)->saddr;
158 else
159 fl.fl6_dst = ipv6_hdr(skb)->saddr;
160
161 rcu_read_lock();
162 ai = nf_get_afinfo(family);
163 if (ai != NULL)
164 ai->route((struct dst_entry **)&rt, &fl);
165 rcu_read_unlock();
166
167 if (rt != NULL) {
168 mtu = dst_mtu(&rt->dst);
169 dst_release(&rt->dst);
170 }
171 return mtu;
172}
173
174static unsigned int
175tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
176{
177 struct iphdr *iph = ip_hdr(skb);
178 __be16 newlen;
179 int ret;
180
181 ret = tcpmss_mangle_packet(skb, par->targinfo,
182 tcpmss_reverse_mtu(skb, PF_INET),
183 iph->ihl * 4,
184 sizeof(*iph) + sizeof(struct tcphdr));
185 if (ret < 0)
186 return NF_DROP;
187 if (ret > 0) {
188 iph = ip_hdr(skb);
189 newlen = htons(ntohs(iph->tot_len) + ret);
190 csum_replace2(&iph->check, iph->tot_len, newlen);
191 iph->tot_len = newlen;
192 }
193 return XT_CONTINUE;
194}
195
196#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
197static unsigned int
198tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
199{
200 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
201 u8 nexthdr;
202 int tcphoff;
203 int ret;
204
205 nexthdr = ipv6h->nexthdr;
206 tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr);
207 if (tcphoff < 0)
208 return NF_DROP;
209 ret = tcpmss_mangle_packet(skb, par->targinfo,
210 tcpmss_reverse_mtu(skb, PF_INET6),
211 tcphoff,
212 sizeof(*ipv6h) + sizeof(struct tcphdr));
213 if (ret < 0)
214 return NF_DROP;
215 if (ret > 0) {
216 ipv6h = ipv6_hdr(skb);
217 ipv6h->payload_len = htons(ntohs(ipv6h->payload_len) + ret);
218 }
219 return XT_CONTINUE;
220}
221#endif
222
223
224static inline bool find_syn_match(const struct xt_entry_match *m)
225{
226 const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data;
227
228 if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
229 tcpinfo->flg_cmp & TCPHDR_SYN &&
230 !(tcpinfo->invflags & XT_TCP_INV_FLAGS))
231 return true;
232
233 return false;
234}
235
236static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
237{
238 const struct xt_tcpmss_info *info = par->targinfo;
239 const struct ipt_entry *e = par->entryinfo;
240 const struct xt_entry_match *ematch;
241
242 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
243 (par->hook_mask & ~((1 << NF_INET_FORWARD) |
244 (1 << NF_INET_LOCAL_OUT) |
245 (1 << NF_INET_POST_ROUTING))) != 0) {
246 pr_info("path-MTU clamping only supported in "
247 "FORWARD, OUTPUT and POSTROUTING hooks\n");
248 return -EINVAL;
249 }
250 xt_ematch_foreach(ematch, e)
251 if (find_syn_match(ematch))
252 return 0;
253 pr_info("Only works on TCP SYN packets\n");
254 return -EINVAL;
255}
256
257#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
258static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
259{
260 const struct xt_tcpmss_info *info = par->targinfo;
261 const struct ip6t_entry *e = par->entryinfo;
262 const struct xt_entry_match *ematch;
263
264 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
265 (par->hook_mask & ~((1 << NF_INET_FORWARD) |
266 (1 << NF_INET_LOCAL_OUT) |
267 (1 << NF_INET_POST_ROUTING))) != 0) {
268 pr_info("path-MTU clamping only supported in "
269 "FORWARD, OUTPUT and POSTROUTING hooks\n");
270 return -EINVAL;
271 }
272 xt_ematch_foreach(ematch, e)
273 if (find_syn_match(ematch))
274 return 0;
275 pr_info("Only works on TCP SYN packets\n");
276 return -EINVAL;
277}
278#endif
279
280static struct xt_target tcpmss_tg_reg[] __read_mostly = {
281 {
282 .family = NFPROTO_IPV4,
283 .name = "TCPMSS",
284 .checkentry = tcpmss_tg4_check,
285 .target = tcpmss_tg4,
286 .targetsize = sizeof(struct xt_tcpmss_info),
287 .proto = IPPROTO_TCP,
288 .me = THIS_MODULE,
289 },
290#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
291 {
292 .family = NFPROTO_IPV6,
293 .name = "TCPMSS",
294 .checkentry = tcpmss_tg6_check,
295 .target = tcpmss_tg6,
296 .targetsize = sizeof(struct xt_tcpmss_info),
297 .proto = IPPROTO_TCP,
298 .me = THIS_MODULE,
299 },
300#endif
301};
302
303static int __init tcpmss_tg_init(void)
304{
305 return xt_register_targets(tcpmss_tg_reg, ARRAY_SIZE(tcpmss_tg_reg));
306}
307
308static void __exit tcpmss_tg_exit(void)
309{
310 xt_unregister_targets(tcpmss_tg_reg, ARRAY_SIZE(tcpmss_tg_reg));
311}
312
313module_init(tcpmss_tg_init);
314module_exit(tcpmss_tg_exit);
315