1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11#include <linux/module.h>
12#include <linux/skbuff.h>
13#include <linux/ip.h>
14#include <linux/gfp.h>
15#include <linux/ipv6.h>
16#include <linux/tcp.h>
17#include <net/dst.h>
18#include <net/flow.h>
19#include <net/ipv6.h>
20#include <net/route.h>
21#include <net/tcp.h>
22
23#include <linux/netfilter_ipv4/ip_tables.h>
24#include <linux/netfilter_ipv6/ip6_tables.h>
25#include <linux/netfilter/x_tables.h>
26#include <linux/netfilter/xt_tcpudp.h>
27#include <linux/netfilter/xt_TCPMSS.h>
28
29MODULE_LICENSE("GPL");
30MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
31MODULE_DESCRIPTION("Xtables: TCP Maximum Segment Size (MSS) adjustment");
32MODULE_ALIAS("ipt_TCPMSS");
33MODULE_ALIAS("ip6t_TCPMSS");
34
35static inline unsigned int
36optlen(const u_int8_t *opt, unsigned int offset)
37{
38
39 if (opt[offset] <= TCPOPT_NOP || opt[offset+1] == 0)
40 return 1;
41 else
42 return opt[offset+1];
43}
44
45static int
46tcpmss_mangle_packet(struct sk_buff *skb,
47 const struct xt_tcpmss_info *info,
48 unsigned int in_mtu,
49 unsigned int tcphoff,
50 unsigned int minlen)
51{
52 struct tcphdr *tcph;
53 unsigned int tcplen, i;
54 __be16 oldval;
55 u16 newmss;
56 u8 *opt;
57
58 if (!skb_make_writable(skb, skb->len))
59 return -1;
60
61 tcplen = skb->len - tcphoff;
62 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
63
64
65 if (tcplen < tcph->doff*4)
66 return -1;
67
68 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
69 if (dst_mtu(skb_dst(skb)) <= minlen) {
70 net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
71 dst_mtu(skb_dst(skb)));
72 return -1;
73 }
74 if (in_mtu <= minlen) {
75 net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
76 in_mtu);
77 return -1;
78 }
79 newmss = min(dst_mtu(skb_dst(skb)), in_mtu) - minlen;
80 } else
81 newmss = info->mss;
82
83 opt = (u_int8_t *)tcph;
84 for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
85 if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
86 opt[i+1] == TCPOLEN_MSS) {
87 u_int16_t oldmss;
88
89 oldmss = (opt[i+2] << 8) | opt[i+3];
90
91
92
93
94
95 if (oldmss <= newmss)
96 return 0;
97
98 opt[i+2] = (newmss & 0xff00) >> 8;
99 opt[i+3] = newmss & 0x00ff;
100
101 inet_proto_csum_replace2(&tcph->check, skb,
102 htons(oldmss), htons(newmss),
103 0);
104 return 0;
105 }
106 }
107
108
109
110
111 if (tcplen > tcph->doff*4)
112 return 0;
113
114
115
116
117 if (skb_tailroom(skb) < TCPOLEN_MSS) {
118 if (pskb_expand_head(skb, 0,
119 TCPOLEN_MSS - skb_tailroom(skb),
120 GFP_ATOMIC))
121 return -1;
122 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
123 }
124
125 skb_put(skb, TCPOLEN_MSS);
126
127 opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
128 memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
129
130 inet_proto_csum_replace2(&tcph->check, skb,
131 htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
132 opt[0] = TCPOPT_MSS;
133 opt[1] = TCPOLEN_MSS;
134 opt[2] = (newmss & 0xff00) >> 8;
135 opt[3] = newmss & 0x00ff;
136
137 inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), 0);
138
139 oldval = ((__be16 *)tcph)[6];
140 tcph->doff += TCPOLEN_MSS/4;
141 inet_proto_csum_replace2(&tcph->check, skb,
142 oldval, ((__be16 *)tcph)[6], 0);
143 return TCPOLEN_MSS;
144}
145
146static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
147 unsigned int family)
148{
149 struct flowi fl;
150 const struct nf_afinfo *ai;
151 struct rtable *rt = NULL;
152 u_int32_t mtu = ~0U;
153
154 if (family == PF_INET) {
155 struct flowi4 *fl4 = &fl.u.ip4;
156 memset(fl4, 0, sizeof(*fl4));
157 fl4->daddr = ip_hdr(skb)->saddr;
158 } else {
159 struct flowi6 *fl6 = &fl.u.ip6;
160
161 memset(fl6, 0, sizeof(*fl6));
162 fl6->daddr = ipv6_hdr(skb)->saddr;
163 }
164 rcu_read_lock();
165 ai = nf_get_afinfo(family);
166 if (ai != NULL)
167 ai->route(&init_net, (struct dst_entry **)&rt, &fl, false);
168 rcu_read_unlock();
169
170 if (rt != NULL) {
171 mtu = dst_mtu(&rt->dst);
172 dst_release(&rt->dst);
173 }
174 return mtu;
175}
176
177static unsigned int
178tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
179{
180 struct iphdr *iph = ip_hdr(skb);
181 __be16 newlen;
182 int ret;
183
184 ret = tcpmss_mangle_packet(skb, par->targinfo,
185 tcpmss_reverse_mtu(skb, PF_INET),
186 iph->ihl * 4,
187 sizeof(*iph) + sizeof(struct tcphdr));
188 if (ret < 0)
189 return NF_DROP;
190 if (ret > 0) {
191 iph = ip_hdr(skb);
192 newlen = htons(ntohs(iph->tot_len) + ret);
193 csum_replace2(&iph->check, iph->tot_len, newlen);
194 iph->tot_len = newlen;
195 }
196 return XT_CONTINUE;
197}
198
199#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
200static unsigned int
201tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
202{
203 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
204 u8 nexthdr;
205 __be16 frag_off;
206 int tcphoff;
207 int ret;
208
209 nexthdr = ipv6h->nexthdr;
210 tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
211 if (tcphoff < 0)
212 return NF_DROP;
213 ret = tcpmss_mangle_packet(skb, par->targinfo,
214 tcpmss_reverse_mtu(skb, PF_INET6),
215 tcphoff,
216 sizeof(*ipv6h) + sizeof(struct tcphdr));
217 if (ret < 0)
218 return NF_DROP;
219 if (ret > 0) {
220 ipv6h = ipv6_hdr(skb);
221 ipv6h->payload_len = htons(ntohs(ipv6h->payload_len) + ret);
222 }
223 return XT_CONTINUE;
224}
225#endif
226
227
228static inline bool find_syn_match(const struct xt_entry_match *m)
229{
230 const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data;
231
232 if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
233 tcpinfo->flg_cmp & TCPHDR_SYN &&
234 !(tcpinfo->invflags & XT_TCP_INV_FLAGS))
235 return true;
236
237 return false;
238}
239
240static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
241{
242 const struct xt_tcpmss_info *info = par->targinfo;
243 const struct ipt_entry *e = par->entryinfo;
244 const struct xt_entry_match *ematch;
245
246 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
247 (par->hook_mask & ~((1 << NF_INET_FORWARD) |
248 (1 << NF_INET_LOCAL_OUT) |
249 (1 << NF_INET_POST_ROUTING))) != 0) {
250 pr_info("path-MTU clamping only supported in "
251 "FORWARD, OUTPUT and POSTROUTING hooks\n");
252 return -EINVAL;
253 }
254 xt_ematch_foreach(ematch, e)
255 if (find_syn_match(ematch))
256 return 0;
257 pr_info("Only works on TCP SYN packets\n");
258 return -EINVAL;
259}
260
261#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
262static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
263{
264 const struct xt_tcpmss_info *info = par->targinfo;
265 const struct ip6t_entry *e = par->entryinfo;
266 const struct xt_entry_match *ematch;
267
268 if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
269 (par->hook_mask & ~((1 << NF_INET_FORWARD) |
270 (1 << NF_INET_LOCAL_OUT) |
271 (1 << NF_INET_POST_ROUTING))) != 0) {
272 pr_info("path-MTU clamping only supported in "
273 "FORWARD, OUTPUT and POSTROUTING hooks\n");
274 return -EINVAL;
275 }
276 xt_ematch_foreach(ematch, e)
277 if (find_syn_match(ematch))
278 return 0;
279 pr_info("Only works on TCP SYN packets\n");
280 return -EINVAL;
281}
282#endif
283
284static struct xt_target tcpmss_tg_reg[] __read_mostly = {
285 {
286 .family = NFPROTO_IPV4,
287 .name = "TCPMSS",
288 .checkentry = tcpmss_tg4_check,
289 .target = tcpmss_tg4,
290 .targetsize = sizeof(struct xt_tcpmss_info),
291 .proto = IPPROTO_TCP,
292 .me = THIS_MODULE,
293 },
294#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
295 {
296 .family = NFPROTO_IPV6,
297 .name = "TCPMSS",
298 .checkentry = tcpmss_tg6_check,
299 .target = tcpmss_tg6,
300 .targetsize = sizeof(struct xt_tcpmss_info),
301 .proto = IPPROTO_TCP,
302 .me = THIS_MODULE,
303 },
304#endif
305};
306
307static int __init tcpmss_tg_init(void)
308{
309 return xt_register_targets(tcpmss_tg_reg, ARRAY_SIZE(tcpmss_tg_reg));
310}
311
312static void __exit tcpmss_tg_exit(void)
313{
314 xt_unregister_targets(tcpmss_tg_reg, ARRAY_SIZE(tcpmss_tg_reg));
315}
316
317module_init(tcpmss_tg_init);
318module_exit(tcpmss_tg_exit);
319