From: Wang Yufen wangyufen@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8KU3B
--------------------------------
A tcp socket in a sockmap. If the packets transmission rate is very fast and the packets receiving rate is very slow, a large number of packets are stacked in the ingress queue on the packets receiving side. As a result the memory is exhausted and the system ooms.
To fix, we add sk_rmem_alloc while sk_msg queued in the ingress queue and subtract sk_rmem_alloc while sk_msg dequeued from the ingress queue and check sk_rmem_alloc at the beginning of bpf_tcp_ingress().
Signed-off-by: Wang Yufen wangyufen@huawei.com Signed-off-by: Zhengchao Shao shaozhengchao@huawei.com --- net/core/skmsg.c | 10 +++++++++- net/ipv4/tcp_bpf.c | 10 ++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-)
diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 93ecfceac1bc..54a8300b4b3e 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -444,8 +444,12 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, if (likely(!peek)) { sge->offset += copy; sge->length -= copy; - if (!msg_rx->skb) + if (!msg_rx->skb) { +#if IS_ENABLED(CONFIG_NETACC_TERRACE) + atomic_sub(copy, &sk->sk_rmem_alloc); +#endif sk_mem_uncharge(sk, copy); + } msg_rx->sg.size -= copy;
if (!sge->length) { @@ -771,6 +775,10 @@ static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) { list_del(&msg->list); +#if IS_ENABLED(CONFIG_NETACC_TERRACE) + if (!msg->skb) + atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc); +#endif sk_msg_free(psock->sk, msg); kfree(msg); } diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 53b0d62fd2c2..835e65ae361e 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -43,6 +43,13 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, return -ENOMEM;
lock_sock(sk); +#if IS_ENABLED(CONFIG_NETACC_TERRACE) + if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { + kfree(tmp); + release_sock(sk); + return -EAGAIN; + } +#endif tmp->sg.start = msg->sg.start; i = msg->sg.start; do { @@ -75,6 +82,9 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, if (!ret) { msg->sg.start = i; sk_psock_queue_msg(psock, tmp); +#if IS_ENABLED(CONFIG_NETACC_TERRACE) + atomic_add(tmp->sg.size, &sk->sk_rmem_alloc); +#endif sk_psock_data_ready(sk, psock); } else { sk_msg_free(sk, tmp);