1
2
3
4
5
6
7
8
9
10
11
12#include <linux/kernel.h>
13#include <linux/if_vlan.h>
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/netlink.h>
17#include <linux/netfilter.h>
18#include <linux/netfilter/nf_tables.h>
19#include <net/netfilter/nf_tables_core.h>
20#include <net/netfilter/nf_tables.h>
21#include <net/netfilter/nf_tables_offload.h>
22
23#include <linux/tcp.h>
24#include <linux/udp.h>
25#include <linux/icmpv6.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28
29
30static bool
31nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
32{
33 int mac_off = skb_mac_header(skb) - skb->data;
34 u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d;
35 struct vlan_ethhdr veth;
36
37 vlanh = (u8 *) &veth;
38 if (offset < ETH_HLEN) {
39 u8 ethlen = min_t(u8, len, ETH_HLEN - offset);
40
41 if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN))
42 return false;
43
44 veth.h_vlan_proto = skb->vlan_proto;
45
46 memcpy(dst_u8, vlanh + offset, ethlen);
47
48 len -= ethlen;
49 if (len == 0)
50 return true;
51
52 dst_u8 += ethlen;
53 offset = ETH_HLEN;
54 } else if (offset >= VLAN_ETH_HLEN) {
55 offset -= VLAN_HLEN;
56 goto skip;
57 }
58
59 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
60 veth.h_vlan_encapsulated_proto = skb->protocol;
61
62 vlanh += offset;
63
64 vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset);
65 memcpy(dst_u8, vlanh, vlan_len);
66
67 len -= vlan_len;
68 if (!len)
69 return true;
70
71 dst_u8 += vlan_len;
72 skip:
73 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
74}
75
76static void nft_payload_eval(const struct nft_expr *expr,
77 struct nft_regs *regs,
78 const struct nft_pktinfo *pkt)
79{
80 const struct nft_payload *priv = nft_expr_priv(expr);
81 const struct sk_buff *skb = pkt->skb;
82 u32 *dest = ®s->data[priv->dreg];
83 int offset;
84
85 dest[priv->len / NFT_REG32_SIZE] = 0;
86 switch (priv->base) {
87 case NFT_PAYLOAD_LL_HEADER:
88 if (!skb_mac_header_was_set(skb))
89 goto err;
90
91 if (skb_vlan_tag_present(skb)) {
92 if (!nft_payload_copy_vlan(dest, skb,
93 priv->offset, priv->len))
94 goto err;
95 return;
96 }
97 offset = skb_mac_header(skb) - skb->data;
98 break;
99 case NFT_PAYLOAD_NETWORK_HEADER:
100 offset = skb_network_offset(skb);
101 break;
102 case NFT_PAYLOAD_TRANSPORT_HEADER:
103 if (!pkt->tprot_set)
104 goto err;
105 offset = pkt->xt.thoff;
106 break;
107 default:
108 BUG();
109 }
110 offset += priv->offset;
111
112 if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
113 goto err;
114 return;
115err:
116 regs->verdict.code = NFT_BREAK;
117}
118
119static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
120 [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 },
121 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
122 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
123 [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 },
124 [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 },
125 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
126 [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 },
127 [NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 },
128};
129
130static int nft_payload_init(const struct nft_ctx *ctx,
131 const struct nft_expr *expr,
132 const struct nlattr * const tb[])
133{
134 struct nft_payload *priv = nft_expr_priv(expr);
135
136 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
137 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
138 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
139 priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]);
140
141 return nft_validate_register_store(ctx, priv->dreg, NULL,
142 NFT_DATA_VALUE, priv->len);
143}
144
145static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
146{
147 const struct nft_payload *priv = nft_expr_priv(expr);
148
149 if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
150 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
151 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
152 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
153 goto nla_put_failure;
154 return 0;
155
156nla_put_failure:
157 return -1;
158}
159
160static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
161 struct nft_flow_rule *flow,
162 const struct nft_payload *priv)
163{
164 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
165
166 switch (priv->offset) {
167 case offsetof(struct ethhdr, h_source):
168 if (priv->len != ETH_ALEN)
169 return -EOPNOTSUPP;
170
171 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
172 src, ETH_ALEN, reg);
173 break;
174 case offsetof(struct ethhdr, h_dest):
175 if (priv->len != ETH_ALEN)
176 return -EOPNOTSUPP;
177
178 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
179 dst, ETH_ALEN, reg);
180 break;
181 default:
182 return -EOPNOTSUPP;
183 }
184
185 return 0;
186}
187
188static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
189 struct nft_flow_rule *flow,
190 const struct nft_payload *priv)
191{
192 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
193
194 switch (priv->offset) {
195 case offsetof(struct iphdr, saddr):
196 if (priv->len != sizeof(struct in_addr))
197 return -EOPNOTSUPP;
198
199 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
200 sizeof(struct in_addr), reg);
201 break;
202 case offsetof(struct iphdr, daddr):
203 if (priv->len != sizeof(struct in_addr))
204 return -EOPNOTSUPP;
205
206 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
207 sizeof(struct in_addr), reg);
208 break;
209 case offsetof(struct iphdr, protocol):
210 if (priv->len != sizeof(__u8))
211 return -EOPNOTSUPP;
212
213 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
214 sizeof(__u8), reg);
215 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
216 break;
217 default:
218 return -EOPNOTSUPP;
219 }
220
221 return 0;
222}
223
224static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
225 struct nft_flow_rule *flow,
226 const struct nft_payload *priv)
227{
228 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
229
230 switch (priv->offset) {
231 case offsetof(struct ipv6hdr, saddr):
232 if (priv->len != sizeof(struct in6_addr))
233 return -EOPNOTSUPP;
234
235 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
236 sizeof(struct in6_addr), reg);
237 break;
238 case offsetof(struct ipv6hdr, daddr):
239 if (priv->len != sizeof(struct in6_addr))
240 return -EOPNOTSUPP;
241
242 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
243 sizeof(struct in6_addr), reg);
244 break;
245 case offsetof(struct ipv6hdr, nexthdr):
246 if (priv->len != sizeof(__u8))
247 return -EOPNOTSUPP;
248
249 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
250 sizeof(__u8), reg);
251 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
252 break;
253 default:
254 return -EOPNOTSUPP;
255 }
256
257 return 0;
258}
259
260static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
261 struct nft_flow_rule *flow,
262 const struct nft_payload *priv)
263{
264 int err;
265
266 switch (ctx->dep.l3num) {
267 case htons(ETH_P_IP):
268 err = nft_payload_offload_ip(ctx, flow, priv);
269 break;
270 case htons(ETH_P_IPV6):
271 err = nft_payload_offload_ip6(ctx, flow, priv);
272 break;
273 default:
274 return -EOPNOTSUPP;
275 }
276
277 return err;
278}
279
280static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
281 struct nft_flow_rule *flow,
282 const struct nft_payload *priv)
283{
284 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
285
286 switch (priv->offset) {
287 case offsetof(struct tcphdr, source):
288 if (priv->len != sizeof(__be16))
289 return -EOPNOTSUPP;
290
291 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
292 sizeof(__be16), reg);
293 break;
294 case offsetof(struct tcphdr, dest):
295 if (priv->len != sizeof(__be16))
296 return -EOPNOTSUPP;
297
298 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
299 sizeof(__be16), reg);
300 break;
301 default:
302 return -EOPNOTSUPP;
303 }
304
305 return 0;
306}
307
308static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
309 struct nft_flow_rule *flow,
310 const struct nft_payload *priv)
311{
312 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
313
314 switch (priv->offset) {
315 case offsetof(struct udphdr, source):
316 if (priv->len != sizeof(__be16))
317 return -EOPNOTSUPP;
318
319 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
320 sizeof(__be16), reg);
321 break;
322 case offsetof(struct udphdr, dest):
323 if (priv->len != sizeof(__be16))
324 return -EOPNOTSUPP;
325
326 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
327 sizeof(__be16), reg);
328 break;
329 default:
330 return -EOPNOTSUPP;
331 }
332
333 return 0;
334}
335
336static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
337 struct nft_flow_rule *flow,
338 const struct nft_payload *priv)
339{
340 int err;
341
342 switch (ctx->dep.protonum) {
343 case IPPROTO_TCP:
344 err = nft_payload_offload_tcp(ctx, flow, priv);
345 break;
346 case IPPROTO_UDP:
347 err = nft_payload_offload_udp(ctx, flow, priv);
348 break;
349 default:
350 return -EOPNOTSUPP;
351 }
352
353 return err;
354}
355
356static int nft_payload_offload(struct nft_offload_ctx *ctx,
357 struct nft_flow_rule *flow,
358 const struct nft_expr *expr)
359{
360 const struct nft_payload *priv = nft_expr_priv(expr);
361 int err;
362
363 switch (priv->base) {
364 case NFT_PAYLOAD_LL_HEADER:
365 err = nft_payload_offload_ll(ctx, flow, priv);
366 break;
367 case NFT_PAYLOAD_NETWORK_HEADER:
368 err = nft_payload_offload_nh(ctx, flow, priv);
369 break;
370 case NFT_PAYLOAD_TRANSPORT_HEADER:
371 err = nft_payload_offload_th(ctx, flow, priv);
372 break;
373 default:
374 err = -EOPNOTSUPP;
375 break;
376 }
377 return err;
378}
379
380static const struct nft_expr_ops nft_payload_ops = {
381 .type = &nft_payload_type,
382 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
383 .eval = nft_payload_eval,
384 .init = nft_payload_init,
385 .dump = nft_payload_dump,
386 .offload = nft_payload_offload,
387};
388
389const struct nft_expr_ops nft_payload_fast_ops = {
390 .type = &nft_payload_type,
391 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
392 .eval = nft_payload_eval,
393 .init = nft_payload_init,
394 .dump = nft_payload_dump,
395 .offload = nft_payload_offload,
396};
397
398static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
399{
400 *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
401 if (*sum == 0)
402 *sum = CSUM_MANGLED_0;
403}
404
405static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
406{
407 struct udphdr *uh, _uh;
408
409 uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
410 if (!uh)
411 return false;
412
413 return (__force bool)uh->check;
414}
415
416static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
417 struct sk_buff *skb,
418 unsigned int *l4csum_offset)
419{
420 switch (pkt->tprot) {
421 case IPPROTO_TCP:
422 *l4csum_offset = offsetof(struct tcphdr, check);
423 break;
424 case IPPROTO_UDP:
425 if (!nft_payload_udp_checksum(skb, pkt->xt.thoff))
426 return -1;
427
428 case IPPROTO_UDPLITE:
429 *l4csum_offset = offsetof(struct udphdr, check);
430 break;
431 case IPPROTO_ICMPV6:
432 *l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
433 break;
434 default:
435 return -1;
436 }
437
438 *l4csum_offset += pkt->xt.thoff;
439 return 0;
440}
441
442static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
443 struct sk_buff *skb,
444 __wsum fsum, __wsum tsum)
445{
446 int l4csum_offset;
447 __sum16 sum;
448
449
450
451
452 if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
453 return 0;
454
455 if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
456 return -1;
457
458
459
460
461 if (skb->ip_summed != CHECKSUM_PARTIAL) {
462 nft_csum_replace(&sum, fsum, tsum);
463 if (skb->ip_summed == CHECKSUM_COMPLETE) {
464 skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
465 tsum);
466 }
467 } else {
468 sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
469 tsum));
470 }
471
472 if (!skb_make_writable(skb, l4csum_offset + sizeof(sum)) ||
473 skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
474 return -1;
475
476 return 0;
477}
478
479static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
480 __wsum fsum, __wsum tsum, int csum_offset)
481{
482 __sum16 sum;
483
484 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
485 return -1;
486
487 nft_csum_replace(&sum, fsum, tsum);
488 if (!skb_make_writable(skb, csum_offset + sizeof(sum)) ||
489 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
490 return -1;
491
492 return 0;
493}
494
495static void nft_payload_set_eval(const struct nft_expr *expr,
496 struct nft_regs *regs,
497 const struct nft_pktinfo *pkt)
498{
499 const struct nft_payload_set *priv = nft_expr_priv(expr);
500 struct sk_buff *skb = pkt->skb;
501 const u32 *src = ®s->data[priv->sreg];
502 int offset, csum_offset;
503 __wsum fsum, tsum;
504
505 switch (priv->base) {
506 case NFT_PAYLOAD_LL_HEADER:
507 if (!skb_mac_header_was_set(skb))
508 goto err;
509 offset = skb_mac_header(skb) - skb->data;
510 break;
511 case NFT_PAYLOAD_NETWORK_HEADER:
512 offset = skb_network_offset(skb);
513 break;
514 case NFT_PAYLOAD_TRANSPORT_HEADER:
515 if (!pkt->tprot_set)
516 goto err;
517 offset = pkt->xt.thoff;
518 break;
519 default:
520 BUG();
521 }
522
523 csum_offset = offset + priv->csum_offset;
524 offset += priv->offset;
525
526 if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
527 (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
528 skb->ip_summed != CHECKSUM_PARTIAL)) {
529 fsum = skb_checksum(skb, offset, priv->len, 0);
530 tsum = csum_partial(src, priv->len, 0);
531
532 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
533 nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
534 goto err;
535
536 if (priv->csum_flags &&
537 nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
538 goto err;
539 }
540
541 if (!skb_make_writable(skb, max(offset + priv->len, 0)) ||
542 skb_store_bits(skb, offset, src, priv->len) < 0)
543 goto err;
544
545 return;
546err:
547 regs->verdict.code = NFT_BREAK;
548}
549
550static int nft_payload_set_init(const struct nft_ctx *ctx,
551 const struct nft_expr *expr,
552 const struct nlattr * const tb[])
553{
554 struct nft_payload_set *priv = nft_expr_priv(expr);
555
556 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
557 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
558 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
559 priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
560
561 if (tb[NFTA_PAYLOAD_CSUM_TYPE])
562 priv->csum_type =
563 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
564 if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
565 priv->csum_offset =
566 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
567 if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
568 u32 flags;
569
570 flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
571 if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
572 return -EINVAL;
573
574 priv->csum_flags = flags;
575 }
576
577 switch (priv->csum_type) {
578 case NFT_PAYLOAD_CSUM_NONE:
579 case NFT_PAYLOAD_CSUM_INET:
580 break;
581 default:
582 return -EOPNOTSUPP;
583 }
584
585 return nft_validate_register_load(priv->sreg, priv->len);
586}
587
588static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
589{
590 const struct nft_payload_set *priv = nft_expr_priv(expr);
591
592 if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
593 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
594 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
595 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
596 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
597 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
598 htonl(priv->csum_offset)) ||
599 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
600 goto nla_put_failure;
601 return 0;
602
603nla_put_failure:
604 return -1;
605}
606
607static const struct nft_expr_ops nft_payload_set_ops = {
608 .type = &nft_payload_type,
609 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
610 .eval = nft_payload_set_eval,
611 .init = nft_payload_set_init,
612 .dump = nft_payload_set_dump,
613};
614
615static const struct nft_expr_ops *
616nft_payload_select_ops(const struct nft_ctx *ctx,
617 const struct nlattr * const tb[])
618{
619 enum nft_payload_bases base;
620 unsigned int offset, len;
621
622 if (tb[NFTA_PAYLOAD_BASE] == NULL ||
623 tb[NFTA_PAYLOAD_OFFSET] == NULL ||
624 tb[NFTA_PAYLOAD_LEN] == NULL)
625 return ERR_PTR(-EINVAL);
626
627 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
628 switch (base) {
629 case NFT_PAYLOAD_LL_HEADER:
630 case NFT_PAYLOAD_NETWORK_HEADER:
631 case NFT_PAYLOAD_TRANSPORT_HEADER:
632 break;
633 default:
634 return ERR_PTR(-EOPNOTSUPP);
635 }
636
637 if (tb[NFTA_PAYLOAD_SREG] != NULL) {
638 if (tb[NFTA_PAYLOAD_DREG] != NULL)
639 return ERR_PTR(-EINVAL);
640 return &nft_payload_set_ops;
641 }
642
643 if (tb[NFTA_PAYLOAD_DREG] == NULL)
644 return ERR_PTR(-EINVAL);
645
646 offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
647 len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
648
649 if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
650 base != NFT_PAYLOAD_LL_HEADER)
651 return &nft_payload_fast_ops;
652 else
653 return &nft_payload_ops;
654}
655
656struct nft_expr_type nft_payload_type __read_mostly = {
657 .name = "payload",
658 .select_ops = nft_payload_select_ops,
659 .policy = nft_payload_policy,
660 .maxattr = NFTA_PAYLOAD_MAX,
661 .owner = THIS_MODULE,
662};
663