Wang Hai (1): tcp_comp: allow ignore local tcp connections
Wang Yufen (6): tcp_comp: add tcp comp option to SYN and SYN-ACK tcp_comp: add init and cleanup hook for compression tcp_comp: only enable compression for give server ports tcp_comp: add stub proto ops for tcp compression socket tcp_comp: implement recvmsg for tcp compression tcp_comp: open configs for tcp compression
Wei Yongjun (3): tcp_comp: add Kconfig for tcp payload compression tcp_comp: add sysctl for enable/disable compression tcp_comp: implement sendmsg for tcp compression
arch/arm64/configs/hulk_defconfig | 5 +- arch/arm64/configs/openeuler_defconfig | 5 +- arch/x86/configs/hulk_defconfig | 3 + arch/x86/configs/openeuler_defconfig | 5 +- include/linux/tcp.h | 2 +- include/net/inet_sock.h | 3 +- include/net/sock.h | 1 + include/net/tcp.h | 40 ++ net/ipv4/Kconfig | 11 + net/ipv4/Makefile | 1 + net/ipv4/syncookies.c | 2 + net/ipv4/sysctl_net_ipv4.c | 42 ++ net/ipv4/tcp.c | 6 + net/ipv4/tcp_comp.c | 941 +++++++++++++++++++++++++++++++++ net/ipv4/tcp_input.c | 44 +- net/ipv4/tcp_ipv4.c | 2 + net/ipv4/tcp_minisocks.c | 3 + net/ipv4/tcp_output.c | 53 ++ net/ipv6/syncookies.c | 2 + 19 files changed, 1159 insertions(+), 12 deletions(-) create mode 100644 net/ipv4/tcp_comp.c
From: Wei Yongjun weiyongjun1@huawei.com
hulk inclusion category: feature bugzilla: NA DTS: #659 CVE: NA
-------------------------------------------------
Add config item CONFIG_TCP_COMP for tcp payload compression.
This allows payload compression handling of the TCP protocol to be done in-kernel.
This patch only adds the CONFIG_TCP_COMP config, tcp compression capability is implemented later.
Signed-off-by: Wei Yongjun weiyongjun1@huawei.com Signed-off-by: Wang Yufen wangyufen@huawei.com --- net/ipv4/Kconfig | 8 ++++++++ 1 file changed, 8 insertions(+)
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 2e12f84..883a638 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -754,3 +754,11 @@ config TCP_MD5SIG on the Internet.
If unsure, say N. + +config TCP_COMP + bool "TCP: Transport Layer Compression support" + ---help--- + Enable kernel payload compression support for TCP protocol. This allows + payload compression handling of the TCP protocol to be done in-kernel. + + If unsure, say Y.
hulk inclusion category: feature bugzilla: NA DTS: #659 CVE: NA
-------------------------------------------------
Add new tcp COMP option to SYN and SYN-ACK when tcp COMP is enabled. connection compress payload only when both side support it.
For KABI compatible, smc_ok bit in struct tcp_options_received is reused for COMP option, so this config depends !CONFIG_SMC.
Signed-off-by: Wang Yufen wangyufen@huawei.com --- include/linux/tcp.h | 2 +- include/net/inet_sock.h | 3 ++- include/net/sock.h | 1 + include/net/tcp.h | 12 +++++++++++ net/ipv4/Kconfig | 1 + net/ipv4/Makefile | 1 + net/ipv4/syncookies.c | 2 ++ net/ipv4/tcp.c | 4 ++++ net/ipv4/tcp_comp.c | 13 ++++++++++++ net/ipv4/tcp_input.c | 44 +++++++++++++++++++++++++++++++++------- net/ipv4/tcp_minisocks.c | 3 +++ net/ipv4/tcp_output.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++++ net/ipv6/syncookies.c | 2 ++ 13 files changed, 132 insertions(+), 9 deletions(-) create mode 100644 net/ipv4/tcp_comp.c
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 1192f1e..0ea3a31 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -110,7 +110,7 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt) { rx_opt->tstamp_ok = rx_opt->sack_ok = 0; rx_opt->wscale_ok = rx_opt->snd_wscale = 0; -#if IS_ENABLED(CONFIG_SMC) +#if IS_ENABLED(CONFIG_SMC) || IS_ENABLED(CONFIG_TCP_COMP) rx_opt->smc_ok = 0; #endif } diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index e8eef85..d5f3098 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -91,7 +91,8 @@ struct inet_request_sock { ecn_ok : 1, acked : 1, no_srccheck: 1, - smc_ok : 1; + smc_ok : 1, + comp_ok : 1; u32 ir_mark; union { struct ip_options_rcu __rcu *ireq_opt; diff --git a/include/net/sock.h b/include/net/sock.h index 920094b..2c4e080 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -833,6 +833,7 @@ enum sock_flags { SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */ SOCK_TXTIME, + SOCK_COMP, };
#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) diff --git a/include/net/tcp.h b/include/net/tcp.h index a9a0db9..a619105 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -192,6 +192,7 @@ */ #define TCPOPT_FASTOPEN_MAGIC 0xF989 #define TCPOPT_SMC_MAGIC 0xE2D4C3D9 +#define TCPOPT_COMP_MAGIC 0x7954
/* * TCP option lengths @@ -205,6 +206,7 @@ #define TCPOLEN_FASTOPEN_BASE 2 #define TCPOLEN_EXP_FASTOPEN_BASE 4 #define TCPOLEN_EXP_SMC_BASE 6 +#define TCPOLEN_EXP_COMP_BASE 4
/* But this is what stacks really send out. */ #define TCPOLEN_TSTAMP_ALIGNED 12 @@ -2208,4 +2210,14 @@ void clean_acked_data_enable(struct inet_connection_sock *icsk,
#endif
+#if IS_ENABLED(CONFIG_TCP_COMP) +extern struct static_key_false tcp_have_comp; +bool tcp_syn_comp_enabled(const struct tcp_sock *tp); +#else +static inline bool tcp_syn_comp_enabled(const struct tcp_sock *tp) +{ + return false; +} +#endif + #endif /* _TCP_H */ diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 883a638..dc4a6d1 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -757,6 +757,7 @@ config TCP_MD5SIG
config TCP_COMP bool "TCP: Transport Layer Compression support" + depends on !SMC ---help--- Enable kernel payload compression support for TCP protocol. This allows payload compression handling of the TCP protocol to be done in-kernel. diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 7446b98..cba816a 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -64,6 +64,7 @@ obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o obj-$(CONFIG_NETLABEL) += cipso_ipv4.o +obj-$(CONFIG_TCP_COMP) += tcp_comp.o
obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \ xfrm4_output.o xfrm4_protocol.o diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 1a06850..8b2fdfc 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -356,6 +356,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) treq->tfo_listener = false; if (IS_ENABLED(CONFIG_SMC)) ireq->smc_ok = 0; + if (IS_ENABLED(CONFIG_TCP_COMP)) + ireq->comp_ok = 0;
ireq->ir_iif = inet_request_bound_dev_if(sk, skb);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 769e1f6..14e4872 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -297,6 +297,10 @@ EXPORT_SYMBOL(tcp_have_smc); #endif
+#if IS_ENABLED(CONFIG_TCP_COMP) +DEFINE_STATIC_KEY_FALSE(tcp_have_comp); +#endif + /* * Current number of TCP sockets. */ diff --git a/net/ipv4/tcp_comp.c b/net/ipv4/tcp_comp.c new file mode 100644 index 00000000..e2bf4fb --- /dev/null +++ b/net/ipv4/tcp_comp.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * TCP compression support + * + * Copyright(c) 2021 Huawei Technologies Co., Ltd + */ + +#include <net/tcp.h> + +bool tcp_syn_comp_enabled(const struct tcp_sock *tp) +{ + return true; +} diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index effba01..a058fb9 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3779,7 +3779,7 @@ static void tcp_parse_fastopen_option(int len, const unsigned char *cookie, foc->exp = exp_opt; }
-static void smc_parse_options(const struct tcphdr *th, +static bool smc_parse_options(const struct tcphdr *th, struct tcp_options_received *opt_rx, const unsigned char *ptr, int opsize) @@ -3788,10 +3788,31 @@ static void smc_parse_options(const struct tcphdr *th, if (static_branch_unlikely(&tcp_have_smc)) { if (th->syn && !(opsize & 1) && opsize >= TCPOLEN_EXP_SMC_BASE && - get_unaligned_be32(ptr) == TCPOPT_SMC_MAGIC) + get_unaligned_be32(ptr) == TCPOPT_SMC_MAGIC) { opt_rx->smc_ok = 1; + return true; + } } #endif + return false; +} + +static bool tcp_parse_comp_option(const struct tcphdr *th, + struct tcp_options_received *opt_rx, + const unsigned char *ptr, + int opsize) +{ +#if IS_ENABLED(CONFIG_TCP_COMP) + if (static_branch_unlikely(&tcp_have_comp)) { + if (th->syn && !(opsize & 1) && + opsize >= TCPOLEN_EXP_COMP_BASE && + get_unaligned_be16(ptr) == TCPOPT_COMP_MAGIC) { + opt_rx->smc_ok = 1; + return true; + } + } +#endif + return false; }
/* Look for tcp options. Normally only called on SYN and SYNACK packets. @@ -3897,15 +3918,21 @@ void tcp_parse_options(const struct net *net, */ if (opsize >= TCPOLEN_EXP_FASTOPEN_BASE && get_unaligned_be16(ptr) == - TCPOPT_FASTOPEN_MAGIC) + TCPOPT_FASTOPEN_MAGIC) { tcp_parse_fastopen_option(opsize - TCPOLEN_EXP_FASTOPEN_BASE, ptr + 2, th->syn, foc, true); - else - smc_parse_options(th, opt_rx, ptr, - opsize); - break; + break; + }
+ if (smc_parse_options(th, opt_rx, ptr, opsize)) + break; + + if (tcp_parse_comp_option(th, opt_rx, ptr, + opsize)) + break; + + break; } ptr += opsize-2; length -= opsize; @@ -6362,6 +6389,9 @@ static void tcp_openreq_init(struct request_sock *req, #if IS_ENABLED(CONFIG_SMC) ireq->smc_ok = rx_opt->smc_ok; #endif +#if IS_ENABLED(CONFIG_TCP_COMP) + ireq->comp_ok = rx_opt->smc_ok; +#endif }
struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index a20b393..2b3be8f 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -529,6 +529,9 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, newtp->rcv_ssthresh = req->rsk_rcv_wnd; newtp->rcv_wnd = req->rsk_rcv_wnd; newtp->rx_opt.wscale_ok = ireq->wscale_ok; +#if IS_ENABLED(CONFIG_TCP_COMP) + newtp->rx_opt.smc_ok = ireq->comp_ok; +#endif if (newtp->rx_opt.wscale_ok) { newtp->rx_opt.snd_wscale = ireq->snd_wscale; newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index d55ee43..37da027 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -401,6 +401,7 @@ static inline bool tcp_urg_mode(const struct tcp_sock *tp) #define OPTION_WSCALE (1 << 3) #define OPTION_FAST_OPEN_COOKIE (1 << 8) #define OPTION_SMC (1 << 9) +#define OPTION_COMP (1 << 10)
static void smc_options_write(__be32 *ptr, u16 *options) { @@ -417,6 +418,19 @@ static void smc_options_write(__be32 *ptr, u16 *options) #endif }
+static void comp_options_write(__be32 *ptr, u16 *options) +{ +#if IS_ENABLED(CONFIG_TCP_COMP) + if (static_branch_unlikely(&tcp_have_comp)) { + if (unlikely(OPTION_COMP & *options)) { + *ptr++ = htonl((TCPOPT_EXP << 24) | + (TCPOLEN_EXP_COMP_BASE << 16) | + (TCPOPT_COMP_MAGIC)); + } + } +#endif +} + struct tcp_out_options { u16 options; /* bit field of OPTION_* */ u16 mss; /* 0 to disable */ @@ -536,6 +550,8 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, }
smc_options_write(ptr, &options); + + comp_options_write(ptr, &options); }
static void smc_set_option(const struct tcp_sock *tp, @@ -571,6 +587,39 @@ static void smc_set_option_cond(const struct tcp_sock *tp, #endif }
+static void comp_set_option(const struct tcp_sock *tp, + struct tcp_out_options *opts, + unsigned int *remaining) +{ +#if IS_ENABLED(CONFIG_TCP_COMP) + if (static_branch_unlikely(&tcp_have_comp)) { + if (tcp_syn_comp_enabled(tp)) { + if (*remaining >= TCPOLEN_EXP_COMP_BASE) { + opts->options |= OPTION_COMP; + *remaining -= TCPOLEN_EXP_COMP_BASE; + } + } + } +#endif +} + +static void comp_set_option_cond(const struct tcp_sock *tp, + const struct inet_request_sock *ireq, + struct tcp_out_options *opts, + unsigned int *remaining) +{ +#if IS_ENABLED(CONFIG_TCP_COMP) + if (static_branch_unlikely(&tcp_have_comp)) { + if (tcp_syn_comp_enabled(tp) && ireq->comp_ok) { + if (*remaining >= TCPOLEN_EXP_COMP_BASE) { + opts->options |= OPTION_COMP; + *remaining -= TCPOLEN_EXP_COMP_BASE; + } + } + } +#endif +} + /* Compute TCP options for SYN packets. This is not the final * network wire format yet. */ @@ -639,6 +688,8 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
smc_set_option(tp, opts, &remaining);
+ comp_set_option(tp, opts, &remaining); + return MAX_TCP_OPTION_SPACE - remaining; }
@@ -704,6 +755,8 @@ static unsigned int tcp_synack_options(const struct sock *sk,
smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
+ comp_set_option_cond(tcp_sk(sk), ireq, opts, &remaining); + return MAX_TCP_OPTION_SPACE - remaining; }
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index ec61b67..62a6148 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -219,6 +219,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) treq->txhash = net_tx_rndhash(); if (IS_ENABLED(CONFIG_SMC)) ireq->smc_ok = 0; + if (IS_ENABLED(CONFIG_TCP_COMP)) + ireq->comp_ok = 0;
/* * We need to lookup the dst_entry to get the correct window size.
hulk inclusion category: feature bugzilla: NA DTS: #659 CVE: NA
-------------------------------------------------
When establishing a tcp connection or closing it, the tcp compression needs to be initialized or cleaned up at the same time.
Add dummy init and cleanup hook for tcp compression. It will be implemented later.
Signed-off-by: Wei Yongjun weiyongjun1@huawei.com Signed-off-by: Wang Yufen wangyufen@huawei.com --- include/net/tcp.h | 9 +++++++++ net/ipv4/tcp.c | 1 + net/ipv4/tcp_comp.c | 8 ++++++++ net/ipv4/tcp_ipv4.c | 2 ++ 4 files changed, 20 insertions(+)
diff --git a/include/net/tcp.h b/include/net/tcp.h index a619105..110ca98 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2213,11 +2213,20 @@ void clean_acked_data_enable(struct inet_connection_sock *icsk, #if IS_ENABLED(CONFIG_TCP_COMP) extern struct static_key_false tcp_have_comp; bool tcp_syn_comp_enabled(const struct tcp_sock *tp); +void tcp_init_compression(struct sock *sk); +void tcp_cleanup_compression(struct sock *sk); #else static inline bool tcp_syn_comp_enabled(const struct tcp_sock *tp) { return false; } +static inline void tcp_init_compression(struct sock *sk) +{ +} + +static inline void tcp_cleanup_compression(struct sock *sk) +{ +} #endif
#endif /* _TCP_H */ diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 14e4872..e1a003d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -470,6 +470,7 @@ void tcp_init_transfer(struct sock *sk, int bpf_op) tcp_init_metrics(sk); tcp_call_bpf(sk, bpf_op, 0, NULL); tcp_init_congestion_control(sk); + tcp_init_compression(sk); tcp_init_buffer_space(sk); }
diff --git a/net/ipv4/tcp_comp.c b/net/ipv4/tcp_comp.c index e2bf4fb..067d48b 100644 --- a/net/ipv4/tcp_comp.c +++ b/net/ipv4/tcp_comp.c @@ -11,3 +11,11 @@ bool tcp_syn_comp_enabled(const struct tcp_sock *tp) { return true; } + +void tcp_init_compression(struct sock *sk) +{ +} + +void tcp_cleanup_compression(struct sock *sk) +{ +} diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 1cde142..313f54b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2074,6 +2074,8 @@ void tcp_v4_destroy_sock(struct sock *sk)
tcp_cleanup_congestion_control(sk);
+ tcp_cleanup_compression(sk); + tcp_cleanup_ulp(sk);
/* Cleanup up the write buffer. */
From: Wei Yongjun weiyongjun1@huawei.com
hulk inclusion category: feature bugzilla: NA DTS: #659 CVE: NA
-------------------------------------------------
Add sysctl interface for enable/disable tcp compression by ports.
Example:
$ echo 4000 > /proc/sys/net/ipv4/tcp_compression_ports will enable port 4000 for tcp compression
$ echo 4000,5000 > /proc/sys/net/ipv4/tcp_compression_ports will enable both port 4000 and 5000 for tcp compression
$ echo > /proc/sys/net/ipv4/tcp_compression_ports will disable tcp compression.
Signed-off-by: Wei Yongjun weiyongjun1@huawei.com Signed-off-by: Wang Yufen wangyufen@huawei.com --- include/net/tcp.h | 3 +++ net/ipv4/sysctl_net_ipv4.c | 33 +++++++++++++++++++++++++++++++++ net/ipv4/tcp_comp.c | 4 ++++ 3 files changed, 40 insertions(+)
diff --git a/include/net/tcp.h b/include/net/tcp.h index 110ca98..d3961e3 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2212,6 +2212,9 @@ void clean_acked_data_enable(struct inet_connection_sock *icsk,
#if IS_ENABLED(CONFIG_TCP_COMP) extern struct static_key_false tcp_have_comp; + +extern unsigned long *sysctl_tcp_compression_ports; + bool tcp_syn_comp_enabled(const struct tcp_sock *tp); void tcp_init_compression(struct sock *sk); void tcp_cleanup_compression(struct sock *sk); diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index fdd166e..8317a2d 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -438,6 +438,30 @@ static int proc_fib_multipath_hash_policy(struct ctl_table *table, int write, } #endif
+#if IS_ENABLED(CONFIG_TCP_COMP) +static int proc_tcp_compression_ports(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + unsigned long *bitmap = *(unsigned long **)table->data; + unsigned long bitmap_len = table->maxlen; + int ret; + + ret = proc_do_large_bitmap(table, write, buffer, lenp, ppos); + if (write && ret == 0) { + if (bitmap_empty(bitmap, bitmap_len)) { + if (static_key_enabled(&tcp_have_comp)) + static_branch_disable(&tcp_have_comp); + } else { + if (!static_key_enabled(&tcp_have_comp)) + static_branch_enable(&tcp_have_comp); + } + } + + return ret; +} +#endif + static struct ctl_table ipv4_table[] = { { .procname = "tcp_max_orphans", @@ -560,6 +584,15 @@ static int proc_fib_multipath_hash_policy(struct ctl_table *table, int write, .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, +#if IS_ENABLED(CONFIG_TCP_COMP) + { + .procname = "tcp_compression_ports", + .data = &sysctl_tcp_compression_ports, + .maxlen = 65536, + .mode = 0644, + .proc_handler = proc_tcp_compression_ports, + }, +#endif { } };
diff --git a/net/ipv4/tcp_comp.c b/net/ipv4/tcp_comp.c index 067d48b..3493255 100644 --- a/net/ipv4/tcp_comp.c +++ b/net/ipv4/tcp_comp.c @@ -7,6 +7,10 @@
#include <net/tcp.h>
+static unsigned long tcp_compression_ports[65536 / 8]; + +unsigned long *sysctl_tcp_compression_ports = tcp_compression_ports; + bool tcp_syn_comp_enabled(const struct tcp_sock *tp) { return true;
hulk inclusion category: feature bugzilla: NA DTS: #659 CVE: NA
-------------------------------------------------
Only enable compression for give server ports, this means we will check either dport when send SYN or sport when send SYN-ACK.
Signed-off-by: Wei Yongjun weiyongjun1@huawei.com Signed-off-by: Wang Yufen wangyufen@huawei.com --- include/net/tcp.h | 2 +- net/ipv4/tcp_comp.c | 18 ++++++++++++++++-- net/ipv4/tcp_output.c | 12 ++++++------ 3 files changed, 23 insertions(+), 9 deletions(-)
diff --git a/include/net/tcp.h b/include/net/tcp.h index d3961e3..41f7ebb 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2215,7 +2215,7 @@ void clean_acked_data_enable(struct inet_connection_sock *icsk,
extern unsigned long *sysctl_tcp_compression_ports;
-bool tcp_syn_comp_enabled(const struct tcp_sock *tp); +bool tcp_syn_comp_enabled(const struct sock *sk, bool active); void tcp_init_compression(struct sock *sk); void tcp_cleanup_compression(struct sock *sk); #else diff --git a/net/ipv4/tcp_comp.c b/net/ipv4/tcp_comp.c index 3493255..a71f23f 100644 --- a/net/ipv4/tcp_comp.c +++ b/net/ipv4/tcp_comp.c @@ -11,13 +11,27 @@
unsigned long *sysctl_tcp_compression_ports = tcp_compression_ports;
-bool tcp_syn_comp_enabled(const struct tcp_sock *tp) +bool tcp_syn_comp_enabled(const struct sock *sk, bool active) { - return true; + struct inet_sock *inet = inet_sk(sk); + int port; + + if (active) + port = ntohs(inet->inet_dport); + else + port = ntohs(inet->inet_sport); + + return test_bit(port, sysctl_tcp_compression_ports); }
void tcp_init_compression(struct sock *sk) { + struct tcp_sock *tp = tcp_sk(sk); + + if (!tp->rx_opt.smc_ok) + return; + + sock_set_flag(sk, SOCK_COMP); }
void tcp_cleanup_compression(struct sock *sk) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 37da027..9453a98 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -587,13 +587,13 @@ static void smc_set_option_cond(const struct tcp_sock *tp, #endif }
-static void comp_set_option(const struct tcp_sock *tp, +static void comp_set_option(const struct sock *sk, struct tcp_out_options *opts, unsigned int *remaining) { #if IS_ENABLED(CONFIG_TCP_COMP) if (static_branch_unlikely(&tcp_have_comp)) { - if (tcp_syn_comp_enabled(tp)) { + if (tcp_syn_comp_enabled(sk, true)) { if (*remaining >= TCPOLEN_EXP_COMP_BASE) { opts->options |= OPTION_COMP; *remaining -= TCPOLEN_EXP_COMP_BASE; @@ -603,14 +603,14 @@ static void comp_set_option(const struct tcp_sock *tp, #endif }
-static void comp_set_option_cond(const struct tcp_sock *tp, +static void comp_set_option_cond(const struct sock *sk, const struct inet_request_sock *ireq, struct tcp_out_options *opts, unsigned int *remaining) { #if IS_ENABLED(CONFIG_TCP_COMP) if (static_branch_unlikely(&tcp_have_comp)) { - if (tcp_syn_comp_enabled(tp) && ireq->comp_ok) { + if (tcp_syn_comp_enabled(sk, false) && ireq->comp_ok) { if (*remaining >= TCPOLEN_EXP_COMP_BASE) { opts->options |= OPTION_COMP; *remaining -= TCPOLEN_EXP_COMP_BASE; @@ -688,7 +688,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
smc_set_option(tp, opts, &remaining);
- comp_set_option(tp, opts, &remaining); + comp_set_option(sk, opts, &remaining);
return MAX_TCP_OPTION_SPACE - remaining; } @@ -755,7 +755,7 @@ static unsigned int tcp_synack_options(const struct sock *sk,
smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
- comp_set_option_cond(tcp_sk(sk), ireq, opts, &remaining); + comp_set_option_cond(sk, ireq, opts, &remaining);
return MAX_TCP_OPTION_SPACE - remaining; }
From: Wang Hai wanghai38@huawei.com
hulk inclusion category: feature bugzilla: NA DTS: #659 CVE: NA
-------------------------------------------------
Tcp compression is used to reduce the amount of data transmitted between multiple machines, which can increase the transmission capacity.
The local tcp connection is a single machine transfer, so there is no meaning to use tcp compression. Ignore it by default.
Enable by sysctl:
echo 1 > /proc/net/ipv4/tcp_compression_local
Signed-off-by: Wang Hai wanghai38@huawei.com Signed-off-by: Wei Yongjun weiyongjun1@huawei.com Signed-off-by: Wang Yufen wangyufen@huawei.com --- include/net/tcp.h | 12 +++++++++++- net/ipv4/sysctl_net_ipv4.c | 9 +++++++++ net/ipv4/tcp_comp.c | 31 ++++++++++++++++++++++++------- net/ipv4/tcp_output.c | 4 ++-- 4 files changed, 46 insertions(+), 10 deletions(-)
diff --git a/include/net/tcp.h b/include/net/tcp.h index 41f7ebb..dd4402a 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2214,8 +2214,11 @@ void clean_acked_data_enable(struct inet_connection_sock *icsk, extern struct static_key_false tcp_have_comp;
extern unsigned long *sysctl_tcp_compression_ports; +extern int sysctl_tcp_compression_local;
-bool tcp_syn_comp_enabled(const struct sock *sk, bool active); +bool tcp_syn_comp_enabled(const struct sock *sk); +bool tcp_synack_comp_enabled(const struct sock *sk, + const struct inet_request_sock *ireq); void tcp_init_compression(struct sock *sk); void tcp_cleanup_compression(struct sock *sk); #else @@ -2223,6 +2226,13 @@ static inline bool tcp_syn_comp_enabled(const struct tcp_sock *tp) { return false; } + +static inline bool tcp_synack_comp_enabled(const struct sock *sk, + const struct inet_request_sock *ireq) +{ + return false; +} + static inline void tcp_init_compression(struct sock *sk) { } diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 8317a2d..840958b6 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -592,6 +592,15 @@ static int proc_tcp_compression_ports(struct ctl_table *table, int write, .mode = 0644, .proc_handler = proc_tcp_compression_ports, }, + { + .procname = "tcp_compression_local", + .data = &sysctl_tcp_compression_local, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, #endif { } }; diff --git a/net/ipv4/tcp_comp.c b/net/ipv4/tcp_comp.c index a71f23f..ec018c9 100644 --- a/net/ipv4/tcp_comp.c +++ b/net/ipv4/tcp_comp.c @@ -10,18 +10,35 @@ static unsigned long tcp_compression_ports[65536 / 8];
unsigned long *sysctl_tcp_compression_ports = tcp_compression_ports; +int sysctl_tcp_compression_local __read_mostly;
-bool tcp_syn_comp_enabled(const struct sock *sk, bool active) +static bool tcp_comp_enabled(__be32 saddr, __be32 daddr, int port) +{ + if (!sysctl_tcp_compression_local && + (saddr == daddr || ipv4_is_loopback(daddr))) + return false; + + return test_bit(port, sysctl_tcp_compression_ports); +} + +bool tcp_syn_comp_enabled(const struct sock *sk) { struct inet_sock *inet = inet_sk(sk); - int port;
- if (active) - port = ntohs(inet->inet_dport); - else - port = ntohs(inet->inet_sport); + return tcp_comp_enabled(inet->inet_saddr, inet->inet_daddr, + ntohs(inet->inet_dport)); +}
- return test_bit(port, sysctl_tcp_compression_ports); +bool tcp_synack_comp_enabled(const struct sock *sk, + const struct inet_request_sock *ireq) +{ + struct inet_sock *inet = inet_sk(sk); + + if (!ireq->comp_ok) + return false; + + return tcp_comp_enabled(ireq->ir_loc_addr, ireq->ir_rmt_addr, + ntohs(inet->inet_sport)); }
void tcp_init_compression(struct sock *sk) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 9453a98..97b9d67 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -593,7 +593,7 @@ static void comp_set_option(const struct sock *sk, { #if IS_ENABLED(CONFIG_TCP_COMP) if (static_branch_unlikely(&tcp_have_comp)) { - if (tcp_syn_comp_enabled(sk, true)) { + if (tcp_syn_comp_enabled(sk)) { if (*remaining >= TCPOLEN_EXP_COMP_BASE) { opts->options |= OPTION_COMP; *remaining -= TCPOLEN_EXP_COMP_BASE; @@ -610,7 +610,7 @@ static void comp_set_option_cond(const struct sock *sk, { #if IS_ENABLED(CONFIG_TCP_COMP) if (static_branch_unlikely(&tcp_have_comp)) { - if (tcp_syn_comp_enabled(sk, false) && ireq->comp_ok) { + if (tcp_synack_comp_enabled(sk, ireq)) { if (*remaining >= TCPOLEN_EXP_COMP_BASE) { opts->options |= OPTION_COMP; *remaining -= TCPOLEN_EXP_COMP_BASE;
hulk inclusion category: feature bugzilla: NA DTS: #659 CVE: NA
-------------------------------------------------
Add stub proto ops for tcp compression socket.
Signed-off-by: Wei Yongjun weiyongjun1@huawei.com Signed-off-by: Wang Yufen wangyufen@huawei.com --- include/net/tcp.h | 6 +++++ net/ipv4/tcp.c | 1 + net/ipv4/tcp_comp.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 73 insertions(+)
diff --git a/include/net/tcp.h b/include/net/tcp.h index dd4402a..aedfeaf 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2221,6 +2221,7 @@ bool tcp_synack_comp_enabled(const struct sock *sk, const struct inet_request_sock *ireq); void tcp_init_compression(struct sock *sk); void tcp_cleanup_compression(struct sock *sk); +int tcp_comp_init(void); #else static inline bool tcp_syn_comp_enabled(const struct tcp_sock *tp) { @@ -2240,6 +2241,11 @@ static inline void tcp_init_compression(struct sock *sk) static inline void tcp_cleanup_compression(struct sock *sk) { } + +static inline int tcp_comp_init(void) +{ + return 0; +} #endif
#endif /* _TCP_H */ diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index e1a003d..a13c9cd 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3959,4 +3959,5 @@ void __init tcp_init(void) tcp_metrics_init(); BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0); tcp_tasklet_init(); + tcp_comp_init(); } diff --git a/net/ipv4/tcp_comp.c b/net/ipv4/tcp_comp.c index ec018c9..b7d3ac2 100644 --- a/net/ipv4/tcp_comp.c +++ b/net/ipv4/tcp_comp.c @@ -12,6 +12,13 @@ unsigned long *sysctl_tcp_compression_ports = tcp_compression_ports; int sysctl_tcp_compression_local __read_mostly;
+static struct proto tcp_prot_override; + +struct tcp_comp_context { + struct proto *sk_proto; + struct rcu_head rcu; +}; + static bool tcp_comp_enabled(__be32 saddr, __be32 daddr, int port) { if (!sysctl_tcp_compression_local && @@ -41,16 +48,75 @@ bool tcp_synack_comp_enabled(const struct sock *sk, ntohs(inet->inet_sport)); }
+static struct tcp_comp_context *comp_get_ctx(const struct sock *sk) +{ + struct inet_connection_sock *icsk = inet_csk(sk); + + return (__force void *)icsk->icsk_ulp_data; +} + +static int tcp_comp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) +{ + struct tcp_comp_context *ctx = comp_get_ctx(sk); + + return ctx->sk_proto->sendmsg(sk, msg, size); +} + +static int tcp_comp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + int nonblock, int flags, int *addr_len) +{ + struct tcp_comp_context *ctx = comp_get_ctx(sk); + + return ctx->sk_proto->recvmsg(sk, msg, len, nonblock, flags, addr_len); +} + void tcp_init_compression(struct sock *sk) { + struct inet_connection_sock *icsk = inet_csk(sk); + struct tcp_comp_context *ctx = NULL; struct tcp_sock *tp = tcp_sk(sk);
if (!tp->rx_opt.smc_ok) return;
+ ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); + if (!ctx) + return; + + ctx->sk_proto = sk->sk_prot; + WRITE_ONCE(sk->sk_prot, &tcp_prot_override); + + rcu_assign_pointer(icsk->icsk_ulp_data, ctx); + sock_set_flag(sk, SOCK_COMP); }
+static void tcp_comp_context_free(struct rcu_head *head) +{ + struct tcp_comp_context *ctx; + + ctx = container_of(head, struct tcp_comp_context, rcu); + + kfree(ctx); +} + void tcp_cleanup_compression(struct sock *sk) { + struct inet_connection_sock *icsk = inet_csk(sk); + struct tcp_comp_context *ctx = comp_get_ctx(sk); + + if (!ctx || !sock_flag(sk, SOCK_COMP)) + return; + + rcu_assign_pointer(icsk->icsk_ulp_data, NULL); + call_rcu(&ctx->rcu, tcp_comp_context_free); +} + +int tcp_comp_init(void) +{ + tcp_prot_override = tcp_prot; + tcp_prot_override.sendmsg = tcp_comp_sendmsg; + tcp_prot_override.recvmsg = tcp_comp_recvmsg; + + return 0; }
From: Wei Yongjun weiyongjun1@huawei.com
hulk inclusion category: feature bugzilla: NA DTS: #659 CVE: NA
-------------------------------------------------
This patch implement software level compression for sending tcp messages. All of the TCP payload will be compressed before xmit.
Signed-off-by: Wei Yongjun weiyongjun1@huawei.com Signed-off-by: Wang Yufen wangyufen@huawei.com --- net/ipv4/Kconfig | 2 +- net/ipv4/tcp_comp.c | 448 +++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 446 insertions(+), 4 deletions(-)
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index dc4a6d1..96f2311 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -757,7 +757,7 @@ config TCP_MD5SIG
config TCP_COMP bool "TCP: Transport Layer Compression support" - depends on !SMC + depends on !SMC && ZSTD_COMPRESS=y ---help--- Enable kernel payload compression support for TCP protocol. This allows payload compression handling of the TCP protocol to be done in-kernel. diff --git a/net/ipv4/tcp_comp.c b/net/ipv4/tcp_comp.c index b7d3ac2..5fe8acd 100644 --- a/net/ipv4/tcp_comp.c +++ b/net/ipv4/tcp_comp.c @@ -6,6 +6,14 @@ */
#include <net/tcp.h> +#include <linux/zstd.h> + +#define TCP_COMP_MAX_PADDING 64 +#define TCP_COMP_SCRATCH_SIZE 65400 +#define TCP_COMP_MAX_CSIZE (TCP_COMP_SCRATCH_SIZE + TCP_COMP_MAX_PADDING) + +#define TCP_COMP_SEND_PENDING 1 +#define ZSTD_COMP_DEFAULT_LEVEL 1
static unsigned long tcp_compression_ports[65536 / 8];
@@ -14,11 +22,42 @@
static struct proto tcp_prot_override;
+struct tcp_comp_context_tx { + ZSTD_CStream *cstream; + void *cworkspace; + void *plaintext_data; + void *compressed_data; + + struct scatterlist sg_data[MAX_SKB_FRAGS]; + unsigned int sg_size; + int sg_num; + + struct scatterlist *partially_send; + bool in_tcp_sendpages; +}; + struct tcp_comp_context { - struct proto *sk_proto; struct rcu_head rcu; + + struct proto *sk_proto; + void (*sk_write_space)(struct sock *sk); + + struct tcp_comp_context_tx tx; + + unsigned long flags; };
+static bool tcp_comp_is_write_pending(struct tcp_comp_context *ctx) +{ + return test_bit(TCP_COMP_SEND_PENDING, &ctx->flags); +} + +static void tcp_comp_err_abort(struct sock *sk, int err) +{ + sk->sk_err = err; + sk->sk_error_report(sk); +} + static bool tcp_comp_enabled(__be32 saddr, __be32 daddr, int port) { if (!sysctl_tcp_compression_local && @@ -55,11 +94,359 @@ static struct tcp_comp_context *comp_get_ctx(const struct sock *sk) return (__force void *)icsk->icsk_ulp_data; }
-static int tcp_comp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) +static int tcp_comp_tx_context_init(struct tcp_comp_context *ctx) +{ + ZSTD_parameters params; + int csize; + + params = ZSTD_getParams(ZSTD_COMP_DEFAULT_LEVEL, PAGE_SIZE, 0); + csize = ZSTD_CStreamWorkspaceBound(params.cParams); + if (csize <= 0) + return -EINVAL; + + ctx->tx.cworkspace = kmalloc(csize, GFP_KERNEL); + if (!ctx->tx.cworkspace) + return -ENOMEM; + + ctx->tx.cstream = ZSTD_initCStream(params, 0, ctx->tx.cworkspace, + csize); + if (!ctx->tx.cstream) + goto err_cstream; + + ctx->tx.plaintext_data = kvmalloc(TCP_COMP_SCRATCH_SIZE, GFP_KERNEL); + if (!ctx->tx.plaintext_data) + goto err_cstream; + + ctx->tx.compressed_data = kvmalloc(TCP_COMP_MAX_CSIZE, GFP_KERNEL); + if (!ctx->tx.compressed_data) + goto err_compressed; + + return 0; + +err_compressed: + kvfree(ctx->tx.plaintext_data); + ctx->tx.plaintext_data = NULL; +err_cstream: + kfree(ctx->tx.cworkspace); + ctx->tx.cworkspace = NULL; + + return -ENOMEM; +} + +static void *tcp_comp_get_tx_stream(struct sock *sk) +{ + struct tcp_comp_context *ctx = comp_get_ctx(sk); + + if (!ctx->tx.plaintext_data) + tcp_comp_tx_context_init(ctx); + + return ctx->tx.plaintext_data; +} + +static int alloc_compressed_sg(struct sock *sk, int len) +{ + struct tcp_comp_context *ctx = comp_get_ctx(sk); + int rc = 0; + + rc = sk_alloc_sg(sk, len, ctx->tx.sg_data, 0, + &ctx->tx.sg_num, &ctx->tx.sg_size, 0); + if (rc == -ENOSPC) + ctx->tx.sg_num = ARRAY_SIZE(ctx->tx.sg_data); + + return rc; +} + +static int memcopy_from_iter(struct sock *sk, struct iov_iter *from, int copy) +{ + void *dest; + int rc; + + dest = tcp_comp_get_tx_stream(sk); + if (!dest) + return -ENOSPC; + + if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) + rc = copy_from_iter_nocache(dest, copy, from); + else + rc = copy_from_iter(dest, copy, from); + + if (rc != copy) + rc = -EFAULT; + + return rc; +} + +static int memcopy_to_sg(struct sock *sk, int bytes) +{ + struct tcp_comp_context *ctx = comp_get_ctx(sk); + struct scatterlist *sg = ctx->tx.sg_data; + char *from, *to; + int copy; + + from = ctx->tx.compressed_data; + while (bytes && sg) { + to = sg_virt(sg); + copy = min_t(int, sg->length, bytes); + memcpy(to, from, copy); + bytes -= copy; + from += copy; + sg = sg_next(sg); + } + + return bytes; +} + +static void trim_sg(struct sock *sk, int target_size) { struct tcp_comp_context *ctx = comp_get_ctx(sk); + struct scatterlist *sg = ctx->tx.sg_data; + int trim = ctx->tx.sg_size - target_size; + int i = ctx->tx.sg_num - 1; + + if (trim <= 0) { + WARN_ON_ONCE(trim < 0); + return; + } + + ctx->tx.sg_size = target_size; + while (trim >= sg[i].length) { + trim -= sg[i].length; + sk_mem_uncharge(sk, sg[i].length); + put_page(sg_page(&sg[i])); + i--;
- return ctx->sk_proto->sendmsg(sk, msg, size); + if (i < 0) + goto out; + } + + sg[i].length -= trim; + sk_mem_uncharge(sk, trim); + +out: + ctx->tx.sg_num = i + 1; + sg_mark_end(ctx->tx.sg_data + ctx->tx.sg_num - 1); +} + +static int tcp_comp_compress_to_sg(struct sock *sk, int bytes) +{ + struct tcp_comp_context *ctx = comp_get_ctx(sk); + ZSTD_outBuffer outbuf; + ZSTD_inBuffer inbuf; + size_t ret; + + inbuf.src = ctx->tx.plaintext_data; + outbuf.dst = ctx->tx.compressed_data; + inbuf.size = bytes; + outbuf.size = TCP_COMP_MAX_CSIZE; + inbuf.pos = 0; + outbuf.pos = 0; + + ret = ZSTD_compressStream(ctx->tx.cstream, &outbuf, &inbuf); + if (ZSTD_isError(ret)) + return -EIO; + + ret = ZSTD_flushStream(ctx->tx.cstream, &outbuf); + if (ZSTD_isError(ret)) + return -EIO; + + if (inbuf.pos != inbuf.size) + return -EIO; + + if (memcopy_to_sg(sk, outbuf.pos)) + return -EIO; + + trim_sg(sk, outbuf.pos); + + return 0; +} + +static int tcp_comp_push_sg(struct sock *sk, struct scatterlist *sg, int flags) +{ + struct tcp_comp_context *ctx = comp_get_ctx(sk); + int ret, offset; + struct page *p; + size_t size; + + ctx->tx.in_tcp_sendpages = true; + while (sg) { + offset = sg->offset; + size = sg->length; + p = sg_page(sg); +retry: + ret = do_tcp_sendpages(sk, p, offset, size, flags); + if (ret != size) { + if (ret > 0) { + sk_mem_uncharge(sk, ret); + sg->offset += ret; + sg->length -= ret; + size -= ret; + offset += ret; + goto retry; + } + ctx->tx.partially_send = (void *)sg; + ctx->tx.in_tcp_sendpages = false; + return ret; + } + + sk_mem_uncharge(sk, ret); + put_page(p); + sg = sg_next(sg); + } + + clear_bit(TCP_COMP_SEND_PENDING, &ctx->flags); + ctx->tx.in_tcp_sendpages = false; + ctx->tx.sg_size = 0; + ctx->tx.sg_num = 0; + + return 0; +} + +static int tcp_comp_push(struct sock *sk, int bytes, int flags) +{ + struct tcp_comp_context *ctx = comp_get_ctx(sk); + int ret; + + ret = tcp_comp_compress_to_sg(sk, bytes); + if (ret < 0) { + pr_debug("%s: failed to compress sg\n", __func__); + return ret; + } + + set_bit(TCP_COMP_SEND_PENDING, &ctx->flags); + + ret = tcp_comp_push_sg(sk, ctx->tx.sg_data, flags); + if (ret) { + pr_debug("%s: failed to tcp_comp_push_sg\n", __func__); + return ret; + } + + return 0; +} + +static int wait_on_pending_writer(struct sock *sk, long *timeo) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + int ret = 0; + + add_wait_queue(sk_sleep(sk), &wait); + while (1) { + if (!*timeo) { + ret = -EAGAIN; + break; + } + + if (signal_pending(current)) { + ret = sock_intr_errno(*timeo); + break; + } + + if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait)) + break; + } + remove_wait_queue(sk_sleep(sk), &wait); + + return ret; +} + +static int tcp_comp_push_pending_sg(struct sock *sk, int flags) +{ + struct tcp_comp_context *ctx = comp_get_ctx(sk); + struct scatterlist *sg; + + if (!ctx->tx.partially_send) + return 0; + + sg = ctx->tx.partially_send; + ctx->tx.partially_send = NULL; + + return tcp_comp_push_sg(sk, sg, flags); +} + +static int tcp_comp_complete_pending_work(struct sock *sk, int flags, + long *timeo) +{ + struct tcp_comp_context *ctx = comp_get_ctx(sk); + int ret = 0; + + if (unlikely(sk->sk_write_pending)) + ret = wait_on_pending_writer(sk, timeo); + + if (!ret && tcp_comp_is_write_pending(ctx)) + ret = tcp_comp_push_pending_sg(sk, flags); + + return ret; +} + +static int tcp_comp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) +{ + struct tcp_comp_context *ctx = comp_get_ctx(sk); + int copied = 0, err = 0; + size_t try_to_copy; + int required_size; + long timeo; + + lock_sock(sk); + + timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); + + err = tcp_comp_complete_pending_work(sk, msg->msg_flags, &timeo); + if (err) + goto out_err; + + while (msg_data_left(msg)) { + if (sk->sk_err) { + err = -sk->sk_err; + goto out_err; + } + + try_to_copy = msg_data_left(msg); + if (try_to_copy > TCP_COMP_SCRATCH_SIZE) + try_to_copy = TCP_COMP_SCRATCH_SIZE; + required_size = try_to_copy + TCP_COMP_MAX_PADDING; + + if (!sk_stream_memory_free(sk)) + goto wait_for_sndbuf; + +alloc_compressed: + err = alloc_compressed_sg(sk, required_size); + if (err) { + if (err != -ENOSPC) + goto wait_for_memory; + goto out_err; + } + + err = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy); + if (err < 0) + goto out_err; + + copied += try_to_copy; + + err = tcp_comp_push(sk, try_to_copy, msg->msg_flags); + if (err < 0) { + if (err == -ENOMEM) + goto wait_for_memory; + if (err != -EAGAIN) + tcp_comp_err_abort(sk, EBADMSG); + goto out_err; + } + + continue; +wait_for_sndbuf: + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); +wait_for_memory: + err = sk_stream_wait_memory(sk, &timeo); + if (err) + goto out_err; + if (ctx->tx.sg_size < required_size) + goto alloc_compressed; + } + +out_err: + err = sk_stream_error(sk, msg->msg_flags, err); + + release_sock(sk); + + return copied ? copied : err; }
static int tcp_comp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, @@ -70,6 +457,30 @@ static int tcp_comp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, return ctx->sk_proto->recvmsg(sk, msg, len, nonblock, flags, addr_len); }
+static void tcp_comp_write_space(struct sock *sk) +{ + struct tcp_comp_context *ctx = comp_get_ctx(sk); + + if (ctx->tx.in_tcp_sendpages) { + ctx->sk_write_space(sk); + return; + } + + if (!sk->sk_write_pending && tcp_comp_is_write_pending(ctx)) { + gfp_t sk_allocation = sk->sk_allocation; + int rc; + + sk->sk_allocation = GFP_ATOMIC; + rc = tcp_comp_push_pending_sg(sk, MSG_DONTWAIT | MSG_NOSIGNAL); + sk->sk_allocation = sk_allocation; + + if (rc < 0) + return; + } + + ctx->sk_write_space(sk); +} + void tcp_init_compression(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); @@ -83,20 +494,46 @@ void tcp_init_compression(struct sock *sk) if (!ctx) return;
+ sg_init_table(ctx->tx.sg_data, ARRAY_SIZE(ctx->tx.sg_data)); + + ctx->sk_write_space = sk->sk_write_space; ctx->sk_proto = sk->sk_prot; WRITE_ONCE(sk->sk_prot, &tcp_prot_override); + sk->sk_write_space = tcp_comp_write_space;
rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
sock_set_flag(sk, SOCK_COMP); }
+static void free_sg(struct sock *sk, struct scatterlist *sg) +{ + while (sg) { + sk_mem_uncharge(sk, sg->length); + put_page(sg_page(sg)); + sg = sg_next(sg); + } +} + +static void tcp_comp_context_tx_free(struct tcp_comp_context *ctx) +{ + kfree(ctx->tx.cworkspace); + ctx->tx.cworkspace = NULL; + + kvfree(ctx->tx.plaintext_data); + ctx->tx.plaintext_data = NULL; + + kvfree(ctx->tx.compressed_data); + ctx->tx.compressed_data = NULL; +} + static void tcp_comp_context_free(struct rcu_head *head) { struct tcp_comp_context *ctx;
ctx = container_of(head, struct tcp_comp_context, rcu);
+ tcp_comp_context_tx_free(ctx); kfree(ctx); }
@@ -108,6 +545,11 @@ void tcp_cleanup_compression(struct sock *sk) if (!ctx || !sock_flag(sk, SOCK_COMP)) return;
+ if (ctx->tx.partially_send) { + free_sg(sk, ctx->tx.partially_send); + ctx->tx.partially_send = NULL; + } + rcu_assign_pointer(icsk->icsk_ulp_data, NULL); call_rcu(&ctx->rcu, tcp_comp_context_free); }
hulk inclusion category: feature bugzilla: NA DTS: #659 CVE: NA
-------------------------------------------------
This patch implement software level compression for receiving tcp messages. The compressed TCP payload will be decompressed after receive.
Signed-off-by: Wang Yufen wangyufen@huawei.com --- net/ipv4/Kconfig | 4 +- net/ipv4/tcp_comp.c | 378 +++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 379 insertions(+), 3 deletions(-)
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 96f2311..d1b1f68 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -757,7 +757,9 @@ config TCP_MD5SIG
config TCP_COMP bool "TCP: Transport Layer Compression support" - depends on !SMC && ZSTD_COMPRESS=y + depends on !SMC && CRYPTO_ZSTD=y + select STREAM_PARSER + ---help--- Enable kernel payload compression support for TCP protocol. This allows payload compression handling of the TCP protocol to be done in-kernel. diff --git a/net/ipv4/tcp_comp.c b/net/ipv4/tcp_comp.c index 5fe8acd..e7f6094 100644 --- a/net/ipv4/tcp_comp.c +++ b/net/ipv4/tcp_comp.c @@ -6,11 +6,15 @@ */
#include <net/tcp.h> +#include <net/strparser.h> #include <linux/zstd.h>
#define TCP_COMP_MAX_PADDING 64 -#define TCP_COMP_SCRATCH_SIZE 65400 +#define TCP_COMP_SCRATCH_SIZE 65535 #define TCP_COMP_MAX_CSIZE (TCP_COMP_SCRATCH_SIZE + TCP_COMP_MAX_PADDING) +#define TCP_COMP_ALLOC_ORDER get_order(65536) +#define TCP_COMP_MAX_WINDOWLOG 17 +#define TCP_COMP_MAX_INPUT (1 << TCP_COMP_MAX_WINDOWLOG)
#define TCP_COMP_SEND_PENDING 1 #define ZSTD_COMP_DEFAULT_LEVEL 1 @@ -36,6 +40,20 @@ struct tcp_comp_context_tx { bool in_tcp_sendpages; };
+struct tcp_comp_context_rx { + ZSTD_DStream *dstream; + void *dworkspace; + void *plaintext_data; + void *compressed_data; + void *remaining_data; + + size_t data_offset; + struct strparser strp; + void (*saved_data_ready)(struct sock *sk); + struct sk_buff *pkt; + bool decompressed; +}; + struct tcp_comp_context { struct rcu_head rcu;
@@ -43,6 +61,7 @@ struct tcp_comp_context { void (*sk_write_space)(struct sock *sk);
struct tcp_comp_context_tx tx; + struct tcp_comp_context_rx rx;
unsigned long flags; }; @@ -449,12 +468,344 @@ static int tcp_comp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) return copied ? copied : err; }
+static struct sk_buff *comp_wait_data(struct sock *sk, int flags, + long timeo, int *err) +{ + struct tcp_comp_context *ctx = comp_get_ctx(sk); + struct sk_buff *skb; + DEFINE_WAIT_FUNC(wait, woken_wake_function); + + while (!(skb = ctx->rx.pkt)) { + if (sk->sk_err) { + *err = sock_error(sk); + return NULL; + } + + if (!skb_queue_empty(&sk->sk_receive_queue)) { + __strp_unpause(&ctx->rx.strp); + if (ctx->rx.pkt) + return ctx->rx.pkt; + } + + if (sk->sk_shutdown & RCV_SHUTDOWN) + return NULL; + + if (sock_flag(sk, SOCK_DONE)) + return NULL; + + if ((flags & MSG_DONTWAIT) || !timeo) { + *err = -EAGAIN; + return NULL; + } + + add_wait_queue(sk_sleep(sk), &wait); + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); + sk_wait_event(sk, &timeo, ctx->rx.pkt != skb, &wait); + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); + remove_wait_queue(sk_sleep(sk), &wait); + + /* Handle signals */ + if (signal_pending(current)) { + *err = sock_intr_errno(timeo); + return NULL; + } + } + + return skb; +} + +static bool comp_advance_skb(struct sock *sk, struct sk_buff *skb, + unsigned int len) +{ + struct tcp_comp_context *ctx = comp_get_ctx(sk); + struct strp_msg *rxm = strp_msg(skb); + + if (len < rxm->full_len) { + rxm->offset += len; + rxm->full_len -= len; + return false; + } + + /* Finished with message */ + ctx->rx.pkt = NULL; + kfree_skb(skb); + __strp_unpause(&ctx->rx.strp); + + return true; +} + +static int tcp_comp_rx_context_init(struct tcp_comp_context *ctx) +{ + int dsize; + + dsize = ZSTD_DStreamWorkspaceBound(TCP_COMP_MAX_INPUT); + if (dsize <= 0) + return -EINVAL; + + ctx->rx.dworkspace = kmalloc(dsize, GFP_KERNEL); + if (!ctx->rx.dworkspace) + return -ENOMEM; + + ctx->rx.dstream = ZSTD_initDStream(TCP_COMP_MAX_INPUT, + ctx->rx.dworkspace, dsize); + if (!ctx->rx.dstream) + goto err_dstream; + + ctx->rx.plaintext_data = kvmalloc(TCP_COMP_MAX_CSIZE * 32, GFP_KERNEL); + if (!ctx->rx.plaintext_data) + goto err_dstream; + + ctx->rx.compressed_data = kvmalloc(TCP_COMP_MAX_CSIZE, GFP_KERNEL); + if (!ctx->rx.compressed_data) + goto err_compressed; + + ctx->rx.remaining_data = kvmalloc(TCP_COMP_MAX_CSIZE, GFP_KERNEL); + if (!ctx->rx.remaining_data) + goto err_remaining; + + ctx->rx.data_offset = 0; + + return 0; + +err_remaining: + kvfree(ctx->rx.compressed_data); + ctx->rx.compressed_data = NULL; +err_compressed: + kvfree(ctx->rx.plaintext_data); + ctx->rx.plaintext_data = NULL; +err_dstream: + kfree(ctx->rx.dworkspace); + ctx->rx.dworkspace = NULL; + + return -ENOMEM; +} + +static void *tcp_comp_get_rx_stream(struct sock *sk) +{ + struct tcp_comp_context *ctx = comp_get_ctx(sk); + + if (!ctx->rx.plaintext_data) + tcp_comp_rx_context_init(ctx); + + return ctx->rx.plaintext_data; +} + +static int tcp_comp_decompress(struct sock *sk, struct sk_buff *skb) +{ + struct tcp_comp_context *ctx = comp_get_ctx(sk); + const int plen = skb->len; + struct strp_msg *rxm; + ZSTD_outBuffer outbuf; + ZSTD_inBuffer inbuf; + int len; + void *to; + + to = tcp_comp_get_rx_stream(sk); + if (!to) + return -ENOSPC; + + if (skb_linearize_cow(skb)) + return -ENOMEM; + + if (plen + ctx->rx.data_offset > TCP_COMP_MAX_CSIZE) + return -ENOMEM; + + if (ctx->rx.data_offset) + memcpy(ctx->rx.compressed_data, ctx->rx.remaining_data, + ctx->rx.data_offset); + + memcpy((char *)ctx->rx.compressed_data + ctx->rx.data_offset, + skb->data, plen); + + inbuf.src = ctx->rx.compressed_data; + inbuf.pos = 0; + inbuf.size = plen + ctx->rx.data_offset; + ctx->rx.data_offset = 0; + + outbuf.dst = ctx->rx.plaintext_data; + outbuf.pos = 0; + outbuf.size = TCP_COMP_MAX_CSIZE * 32; + + while (1) { + size_t ret; + + to = outbuf.dst; + + ret = ZSTD_decompressStream(ctx->rx.dstream, &outbuf, &inbuf); + if (ZSTD_isError(ret)) + return -EIO; + + len = outbuf.pos - plen; + if (len > skb_tailroom(skb)) + len = skb_tailroom(skb); + + __skb_put(skb, len); + rxm = strp_msg(skb); + rxm->full_len += len; + + len += plen; + skb_copy_to_linear_data(skb, to, len); + + while ((to += len, outbuf.pos -= len) > 0) { + struct page *pages; + skb_frag_t *frag; + + if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) + return -EMSGSIZE; + + frag = skb_shinfo(skb)->frags + + skb_shinfo(skb)->nr_frags; + pages = alloc_pages(__GFP_NOWARN | GFP_KERNEL | __GFP_COMP, + TCP_COMP_ALLOC_ORDER); + + if (!pages) + return -ENOMEM; + + __skb_frag_set_page(frag, pages); + len = PAGE_SIZE << TCP_COMP_ALLOC_ORDER; + if (outbuf.pos < len) + len = outbuf.pos; + + frag->page_offset = 0; + skb_frag_size_set(frag, len); + memcpy(skb_frag_address(frag), to, len); + + skb->truesize += len; + skb->data_len += len; + skb->len += len; + rxm->full_len += len; + skb_shinfo(skb)->nr_frags++; + } + + if (ret == 0) + break; + + if (inbuf.pos >= plen || !inbuf.pos) { + if (inbuf.pos < inbuf.size) { + memcpy((char *)ctx->rx.remaining_data, + (char *)inbuf.src + inbuf.pos, + inbuf.size - inbuf.pos); + ctx->rx.data_offset = inbuf.size - inbuf.pos; + } + break; + } + } + return 0; +} + static int tcp_comp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len) { struct tcp_comp_context *ctx = comp_get_ctx(sk); + struct strp_msg *rxm; + struct sk_buff *skb; + ssize_t copied = 0; + int target, err = 0; + long timeo; + + flags |= nonblock; + + if (unlikely(flags & MSG_ERRQUEUE)) + return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR); + + lock_sock(sk); + + target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); + timeo = sock_rcvtimeo(sk, flags & MSG_WAITALL); + + do { + int chunk = 0; + + skb = comp_wait_data(sk, flags, timeo, &err); + if (!skb) + goto recv_end; + + if (!ctx->rx.decompressed) { + err = tcp_comp_decompress(sk, skb); + if (err < 0) { + if (err != -ENOSPC) + tcp_comp_err_abort(sk, EBADMSG); + goto recv_end; + } + ctx->rx.decompressed = true; + } + rxm = strp_msg(skb); + + chunk = min_t(unsigned int, rxm->full_len, len); + + err = skb_copy_datagram_msg(skb, rxm->offset, msg, + chunk); + if (err < 0) + goto recv_end; + + copied += chunk; + len -= chunk; + if (likely(!(flags & MSG_PEEK))) + comp_advance_skb(sk, skb, chunk); + else + break;
- return ctx->sk_proto->recvmsg(sk, msg, len, nonblock, flags, addr_len); + if (copied >= target && !ctx->rx.pkt) + break; + } while (len > 0); + +recv_end: + release_sock(sk); + return copied ? : err; +} + +bool comp_stream_read(const struct sock *sk) +{ + struct tcp_comp_context *ctx = comp_get_ctx(sk); + + if (ctx->rx.pkt) + return true; + + return false; +} + +static void comp_data_ready(struct sock *sk) +{ + struct tcp_comp_context *ctx = comp_get_ctx(sk); + + strp_data_ready(&ctx->rx.strp); +} + +static void comp_queue(struct strparser *strp, struct sk_buff *skb) +{ + struct tcp_comp_context *ctx = comp_get_ctx(strp->sk); + + ctx->rx.decompressed = false; + ctx->rx.pkt = skb; + strp_pause(strp); + ctx->rx.saved_data_ready(strp->sk); +} + +static int comp_read_size(struct strparser *strp, struct sk_buff *skb) +{ + struct strp_msg *rxm = strp_msg(skb); + + if (rxm->offset > skb->len) + return 0; + + return skb->len; +} + +void comp_setup_strp(struct sock *sk, struct tcp_comp_context *ctx) +{ + struct strp_callbacks cb; + + memset(&cb, 0, sizeof(cb)); + cb.rcv_msg = comp_queue; + cb.parse_msg = comp_read_size; + strp_init(&ctx->rx.strp, sk, &cb); + + write_lock_bh(&sk->sk_callback_lock); + ctx->rx.saved_data_ready = sk->sk_data_ready; + sk->sk_data_ready = comp_data_ready; + write_unlock_bh(&sk->sk_callback_lock); + + strp_check_rcv(&ctx->rx.strp); }
static void tcp_comp_write_space(struct sock *sk) @@ -504,6 +855,7 @@ void tcp_init_compression(struct sock *sk) rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
sock_set_flag(sk, SOCK_COMP); + comp_setup_strp(sk, ctx); }
static void free_sg(struct sock *sk, struct scatterlist *sg) @@ -527,6 +879,21 @@ static void tcp_comp_context_tx_free(struct tcp_comp_context *ctx) ctx->tx.compressed_data = NULL; }
+static void tcp_comp_context_rx_free(struct tcp_comp_context *ctx) +{ + kfree(ctx->rx.dworkspace); + ctx->rx.dworkspace = NULL; + + kvfree(ctx->rx.plaintext_data); + ctx->rx.plaintext_data = NULL; + + kvfree(ctx->rx.compressed_data); + ctx->rx.compressed_data = NULL; + + kvfree(ctx->rx.remaining_data); + ctx->rx.remaining_data = NULL; +} + static void tcp_comp_context_free(struct rcu_head *head) { struct tcp_comp_context *ctx; @@ -534,6 +901,7 @@ static void tcp_comp_context_free(struct rcu_head *head) ctx = container_of(head, struct tcp_comp_context, rcu);
tcp_comp_context_tx_free(ctx); + tcp_comp_context_rx_free(ctx); kfree(ctx); }
@@ -550,6 +918,11 @@ void tcp_cleanup_compression(struct sock *sk) ctx->tx.partially_send = NULL; }
+ if (ctx->rx.pkt) { + kfree_skb(ctx->rx.pkt); + ctx->rx.pkt = NULL; + } + rcu_assign_pointer(icsk->icsk_ulp_data, NULL); call_rcu(&ctx->rcu, tcp_comp_context_free); } @@ -559,6 +932,7 @@ int tcp_comp_init(void) tcp_prot_override = tcp_prot; tcp_prot_override.sendmsg = tcp_comp_sendmsg; tcp_prot_override.recvmsg = tcp_comp_recvmsg; + tcp_prot_override.stream_memory_read = comp_stream_read;
return 0; }
hulk inclusion category: feature bugzilla: NA DTS: #659 CVE: NA
-------------------------------------------------
Open configs for tcp compression
Signed-off-by: Wang Yufen wangyufen@huawei.com --- arch/arm64/configs/hulk_defconfig | 5 ++++- arch/arm64/configs/openeuler_defconfig | 5 ++++- arch/x86/configs/hulk_defconfig | 3 +++ arch/x86/configs/openeuler_defconfig | 5 ++++- 4 files changed, 15 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/configs/hulk_defconfig b/arch/arm64/configs/hulk_defconfig index 8030db9..190e977 100644 --- a/arch/arm64/configs/hulk_defconfig +++ b/arch/arm64/configs/hulk_defconfig @@ -1103,6 +1103,7 @@ CONFIG_DEFAULT_CUBIC=y # CONFIG_DEFAULT_RENO is not set CONFIG_DEFAULT_TCP_CONG="cubic" CONFIG_TCP_MD5SIG=y +CONFIG_TCP_COMP=y CONFIG_IPV6=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_IPV6_ROUTE_INFO=y @@ -5312,7 +5313,7 @@ CONFIG_CRYPTO_LZO=y # CONFIG_CRYPTO_842 is not set CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ZSTD is not set +CONFIG_CRYPTO_ZSTD=y
# # Random Number Generation @@ -5416,6 +5417,8 @@ CONFIG_LZO_DECOMPRESS=y CONFIG_LZ4_COMPRESS=m CONFIG_LZ4HC_COMPRESS=m CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y CONFIG_XZ_DEC=y CONFIG_XZ_DEC_X86=y CONFIG_XZ_DEC_POWERPC=y diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 8cfe2c3..1511883 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -1095,6 +1095,7 @@ CONFIG_DEFAULT_CUBIC=y # CONFIG_DEFAULT_RENO is not set CONFIG_DEFAULT_TCP_CONG="cubic" CONFIG_TCP_MD5SIG=y +CONFIG_TCP_COMP=y CONFIG_IPV6=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_IPV6_ROUTE_INFO=y @@ -5654,7 +5655,7 @@ CONFIG_CRYPTO_LZO=y # CONFIG_CRYPTO_842 is not set CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -# CONFIG_CRYPTO_ZSTD is not set +CONFIG_CRYPTO_ZSTD=y
# # Random Number Generation @@ -5756,6 +5757,8 @@ CONFIG_LZO_DECOMPRESS=y CONFIG_LZ4_COMPRESS=m CONFIG_LZ4HC_COMPRESS=m CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y CONFIG_XZ_DEC=y CONFIG_XZ_DEC_X86=y CONFIG_XZ_DEC_POWERPC=y diff --git a/arch/x86/configs/hulk_defconfig b/arch/x86/configs/hulk_defconfig index 38ac853..6478544 100644 --- a/arch/x86/configs/hulk_defconfig +++ b/arch/x86/configs/hulk_defconfig @@ -1121,6 +1121,7 @@ CONFIG_DEFAULT_CUBIC=y # CONFIG_DEFAULT_RENO is not set CONFIG_DEFAULT_TCP_CONG="cubic" CONFIG_TCP_MD5SIG=y +CONFIG_TCP_COMP=y CONFIG_IPV6=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_IPV6_ROUTE_INFO=y @@ -7194,6 +7195,8 @@ CONFIG_ZLIB_DEFLATE=y CONFIG_LZO_COMPRESS=y CONFIG_LZO_DECOMPRESS=y CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y CONFIG_XZ_DEC=y CONFIG_XZ_DEC_X86=y CONFIG_XZ_DEC_POWERPC=y diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig index 9bfab4e..80f5769 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig @@ -1117,6 +1117,7 @@ CONFIG_DEFAULT_CUBIC=y # CONFIG_DEFAULT_RENO is not set CONFIG_DEFAULT_TCP_CONG="cubic" CONFIG_TCP_MD5SIG=y +CONFIG_TCP_COMP=y CONFIG_IPV6=y CONFIG_IPV6_ROUTER_PREF=y CONFIG_IPV6_ROUTE_INFO=y @@ -7104,7 +7105,7 @@ CONFIG_CRYPTO_LZO=y # CONFIG_CRYPTO_842 is not set # CONFIG_CRYPTO_LZ4 is not set # CONFIG_CRYPTO_LZ4HC is not set -# CONFIG_CRYPTO_ZSTD is not set +CONFIG_CRYPTO_ZSTD=y
# # Random Number Generation @@ -7203,6 +7204,8 @@ CONFIG_ZLIB_DEFLATE=y CONFIG_LZO_COMPRESS=y CONFIG_LZO_DECOMPRESS=y CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y CONFIG_XZ_DEC=y CONFIG_XZ_DEC_X86=y CONFIG_XZ_DEC_POWERPC=y