Hi all:
These patches are used to resolve the xfs problem when perform
xfstests. For details about how to fix the problem, we can see the
link below:
https://lore.kernel.org/linux-xfs/154697976479.2839.3584921201780682011.stg…https://lore.kernel.org/linux-xfs/157232182246.593721.4902116478429075171.s…
Thanks
---
Darrick J. Wong (8):
xfs: abort xattr scrub if fatal signals are pending
xfs: scrub should flag dir/attr offsets that aren't mappable with
xfs_dablk_t
xfs: check directory name validity
xfs: check attribute name validity
xfs: check attribute leaf block structure
xfs: namecheck attribute names before listing them
xfs: namecheck directory entry names before listing them
xfs: replace -EIO with -EFSCORRUPTED for corrupt metadata
fs/xfs/libxfs/xfs_attr.c | 17 +++++++++
fs/xfs/libxfs/xfs_attr.h | 2 +-
fs/xfs/libxfs/xfs_attr_leaf.c | 68 +++++++++++++++++++++++++++++++++--
fs/xfs/libxfs/xfs_attr_leaf.h | 4 +--
fs/xfs/libxfs/xfs_bmap.c | 6 ++--
fs/xfs/libxfs/xfs_dir2.c | 17 +++++++++
fs/xfs/libxfs/xfs_dir2.h | 1 +
fs/xfs/libxfs/xfs_types.c | 11 ++++++
fs/xfs/libxfs/xfs_types.h | 1 +
fs/xfs/scrub/attr.c | 11 ++++++
fs/xfs/scrub/bmap.c | 27 ++++++++++++++
fs/xfs/scrub/dir.c | 6 ++++
fs/xfs/xfs_attr_inactive.c | 6 ++--
fs/xfs/xfs_attr_list.c | 60 ++++++++++++++++++++-----------
fs/xfs/xfs_dir2_readdir.c | 27 +++++++++++---
fs/xfs/xfs_dquot.c | 2 +-
16 files changed, 228 insertions(+), 38 deletions(-)
--
2.18.4
From: Eric Dumazet <edumazet(a)google.com>
stable inclusion
from stable-v4.19.246
commit ddec440133913a6b8880e53b896d521a4b7ff24f
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I5C3A9
CVE: CVE-2022-32296
--------------------------------
commit 190cc82489f46f9d88e73c81a47e14f80a791e1a upstream.
RFC 6056 (Recommendations for Transport-Protocol Port Randomization)
provides good summary of why source selection needs extra care.
David Dworken reminded us that linux implements Algorithm 3
as described in RFC 6056 3.3.3
Quoting David :
In the context of the web, this creates an interesting info leak where
websites can count how many TCP connections a user's computer is
establishing over time. For example, this allows a website to count
exactly how many subresources a third party website loaded.
This also allows:
- Distinguishing between different users behind a VPN based on
distinct source port ranges.
- Tracking users over time across multiple networks.
- Covert communication channels between different browsers/browser
profiles running on the same computer
- Tracking what applications are running on a computer based on
the pattern of how fast source ports are getting incremented.
Section 3.3.4 describes an enhancement, that reduces
attackers ability to use the basic information currently
stored into the shared 'u32 hint'.
This change also decreases collision rate when
multiple applications need to connect() to
different destinations.
Signed-off-by: Eric Dumazet <edumazet(a)google.com>
Reported-by: David Dworken <ddworken(a)google.com>
Cc: Willem de Bruijn <willemb(a)google.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
[SG: Adjusted context]
Signed-off-by: Stefan Ghinea <stefan.ghinea(a)windriver.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Conflicts:
net/ipv4/inet_hashtables.c
Signed-off-by: Baisong Zhong <zhongbaisong(a)huawei.com>
Reviewed-by: Wei Yongjun <weiyongjun1(a)huawei.com>
Reviewed-by: Xiu Jianfeng <xiujianfeng(a)huawei.com>
Signed-off-by: Yongqiang Liu <liuyongqiang13(a)huawei.com>
---
net/ipv4/inet_hashtables.c | 20 +++++++++++++++++---
1 file changed, 17 insertions(+), 3 deletions(-)
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index bd0108aaa45b..72b8b6f44add 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -666,6 +666,17 @@ void inet_unhash(struct sock *sk)
}
EXPORT_SYMBOL_GPL(inet_unhash);
+/* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm
+ * Note that we use 32bit integers (vs RFC 'short integers')
+ * because 2^16 is not a multiple of num_ephemeral and this
+ * property might be used by clever attacker.
+ * RFC claims using TABLE_LENGTH=10 buckets gives an improvement,
+ * we use 256 instead to really give more isolation and
+ * privacy, this only consumes 1 KB of kernel memory.
+ */
+#define INET_TABLE_PERTURB_SHIFT 8
+static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT];
+
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk, u64 port_offset,
int (*check_established)(struct inet_timewait_death_row *,
@@ -679,8 +690,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct inet_bind_bucket *tb;
u32 remaining, offset;
int ret, i, low, high;
- static u32 hint;
int l3mdev;
+ u32 index;
if (port) {
head = &hinfo->bhash[inet_bhashfn(net, port,
@@ -707,7 +718,10 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
if (likely(remaining > 1))
remaining &= ~1U;
- offset = hint + port_offset;
+ net_get_random_once(table_perturb, sizeof(table_perturb));
+ index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT);
+
+ offset = READ_ONCE(table_perturb[index]) + port_offset;
offset %= remaining;
/* In first pass we try ports of @low parity.
* inet_csk_get_port() does the opposite choice.
@@ -762,7 +776,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
return -EADDRNOTAVAIL;
ok:
- hint += i + 2;
+ WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
/* Head lock still held and bh's disabled */
inet_bind_hash(sk, tb, port);
--
2.25.1
From: Michael Ellerman <mpe(a)ellerman.id.au>
mainline inclusion
from mainline-v5.19-rc2
commit 8e1278444446fc97778a5e5c99bca1ce0bbc5ec9
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I5C43D
CVE: CVE-2022-32981
--------------------------------
The ptrace PEEKUSR/POKEUSR (aka PEEKUSER/POKEUSER) API allows a process
to read/write registers of another process.
To get/set a register, the API takes an index into an imaginary address
space called the "USER area", where the registers of the process are
laid out in some fashion.
The kernel then maps that index to a particular register in its own data
structures and gets/sets the value.
The API only allows a single machine-word to be read/written at a time.
So 4 bytes on 32-bit kernels and 8 bytes on 64-bit kernels.
The way floating point registers (FPRs) are addressed is somewhat
complicated, because double precision float values are 64-bit even on
32-bit CPUs. That means on 32-bit kernels each FPR occupies two
word-sized locations in the USER area. On 64-bit kernels each FPR
occupies one word-sized location in the USER area.
Internally the kernel stores the FPRs in an array of u64s, or if VSX is
enabled, an array of pairs of u64s where one half of each pair stores
the FPR. Which half of the pair stores the FPR depends on the kernel's
endianness.
To handle the different layouts of the FPRs depending on VSX/no-VSX and
big/little endian, the TS_FPR() macro was introduced.
Unfortunately the TS_FPR() macro does not take into account the fact
that the addressing of each FPR differs between 32-bit and 64-bit
kernels. It just takes the index into the "USER area" passed from
userspace and indexes into the fp_state.fpr array.
On 32-bit there are 64 indexes that address FPRs, but only 32 entries in
the fp_state.fpr array, meaning the user can read/write 256 bytes past
the end of the array. Because the fp_state sits in the middle of the
thread_struct there are various fields than can be overwritten,
including some pointers. As such it may be exploitable.
It has also been observed to cause systems to hang or otherwise
misbehave when using gdbserver, and is probably the root cause of this
report which could not be easily reproduced:
https://lore.kernel.org/linuxppc-dev/dc38afe9-6b78-f3f5-666b-986939e40fc6@k…
Rather than trying to make the TS_FPR() macro even more complicated to
fix the bug, or add more macros, instead add a special-case for 32-bit
kernels. This is more obvious and hopefully avoids a similar bug
happening again in future.
Note that because 32-bit kernels never have VSX enabled the code doesn't
need to consider TS_FPRWIDTH/OFFSET at all. Add a BUILD_BUG_ON() to
ensure that 32-bit && VSX is never enabled.
Fixes: 87fec0514f61 ("powerpc: PTRACE_PEEKUSR/PTRACE_POKEUSER of FPR registers in little endian builds")
Cc: stable(a)vger.kernel.org # v3.13+
Reported-by: Ariel Miculas <ariel.miculas(a)belden.com>
Tested-by: Christophe Leroy <christophe.leroy(a)csgroup.eu>
Signed-off-by: Michael Ellerman <mpe(a)ellerman.id.au>
Link: https://lore.kernel.org/r/20220609133245.573565-1-mpe@ellerman.id.au
Conflicts:
arch/powerpc/kernel/ptrace/ptrace-fpu.c
arch/powerpc/kernel/ptrace/ptrace.c
Signed-off-by: Yipeng Zou <zouyipeng(a)huawei.com>
Reviewed-by: Zhang Jianhua <chris.zjh(a)huawei.com>
Reviewed-by: Liao Chang <liaochang1(a)huawei.com>
Signed-off-by: Yongqiang Liu <liuyongqiang13(a)huawei.com>
---
arch/powerpc/kernel/ptrace.c | 25 +++++++++++++++++--------
1 file changed, 17 insertions(+), 8 deletions(-)
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index e08b32ccf1d9..d245f0af412a 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -2980,6 +2980,9 @@ long arch_ptrace(struct task_struct *child, long request,
void __user *datavp = (void __user *) data;
unsigned long __user *datalp = datavp;
+ // ptrace_get/put_fpr() rely on PPC32 and VSX being incompatible
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_VSX));
+
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
@@ -3006,10 +3009,13 @@ long arch_ptrace(struct task_struct *child, long request,
unsigned int fpidx = index - PT_FPR0;
flush_fp_to_thread(child);
- if (fpidx < (PT_FPSCR - PT_FPR0))
- memcpy(&tmp, &child->thread.TS_FPR(fpidx),
- sizeof(long));
- else
+ if (fpidx < (PT_FPSCR - PT_FPR0)) {
+ if (IS_ENABLED(CONFIG_PPC32))
+ // On 32-bit the index we are passed refers to 32-bit words
+ tmp = ((u32 *)child->thread.fp_state.fpr)[fpidx];
+ else
+ memcpy(&tmp, &child->thread.TS_FPR(fpidx), sizeof(long));
+ } else
tmp = child->thread.fp_state.fpscr;
}
ret = put_user(tmp, datalp);
@@ -3039,10 +3045,13 @@ long arch_ptrace(struct task_struct *child, long request,
unsigned int fpidx = index - PT_FPR0;
flush_fp_to_thread(child);
- if (fpidx < (PT_FPSCR - PT_FPR0))
- memcpy(&child->thread.TS_FPR(fpidx), &data,
- sizeof(long));
- else
+ if (fpidx < (PT_FPSCR - PT_FPR0)) {
+ if (IS_ENABLED(CONFIG_PPC32))
+ // On 32-bit the index we are passed refers to 32-bit words
+ ((u32 *)child->thread.fp_state.fpr)[fpidx] = data;
+ else
+ memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long));
+ } else
child->thread.fp_state.fpscr = data;
ret = 0;
}
--
2.25.1
From: Xin Long <lucien.xin(a)gmail.com>
stable inclusion
from stable-v4.19.224
commit af6e6e58f7ebf86b4e7201694b1e4f3a62cbc3ec
category: bugfix
bugzilla: 186701, https://gitee.com/src-openeuler/kernel/issues/I5CAM2
CVE: CVE-2022-20154
--------------------------------
[ Upstream commit 5ec7d18d1813a5bead0b495045606c93873aecbb ]
This patch is to delay the endpoint free by calling call_rcu() to fix
another use-after-free issue in sctp_sock_dump():
BUG: KASAN: use-after-free in __lock_acquire+0x36d9/0x4c20
Call Trace:
__lock_acquire+0x36d9/0x4c20 kernel/locking/lockdep.c:3218
lock_acquire+0x1ed/0x520 kernel/locking/lockdep.c:3844
__raw_spin_lock_bh include/linux/spinlock_api_smp.h:135 [inline]
_raw_spin_lock_bh+0x31/0x40 kernel/locking/spinlock.c:168
spin_lock_bh include/linux/spinlock.h:334 [inline]
__lock_sock+0x203/0x350 net/core/sock.c:2253
lock_sock_nested+0xfe/0x120 net/core/sock.c:2774
lock_sock include/net/sock.h:1492 [inline]
sctp_sock_dump+0x122/0xb20 net/sctp/diag.c:324
sctp_for_each_transport+0x2b5/0x370 net/sctp/socket.c:5091
sctp_diag_dump+0x3ac/0x660 net/sctp/diag.c:527
__inet_diag_dump+0xa8/0x140 net/ipv4/inet_diag.c:1049
inet_diag_dump+0x9b/0x110 net/ipv4/inet_diag.c:1065
netlink_dump+0x606/0x1080 net/netlink/af_netlink.c:2244
__netlink_dump_start+0x59a/0x7c0 net/netlink/af_netlink.c:2352
netlink_dump_start include/linux/netlink.h:216 [inline]
inet_diag_handler_cmd+0x2ce/0x3f0 net/ipv4/inet_diag.c:1170
__sock_diag_cmd net/core/sock_diag.c:232 [inline]
sock_diag_rcv_msg+0x31d/0x410 net/core/sock_diag.c:263
netlink_rcv_skb+0x172/0x440 net/netlink/af_netlink.c:2477
sock_diag_rcv+0x2a/0x40 net/core/sock_diag.c:274
This issue occurs when asoc is peeled off and the old sk is freed after
getting it by asoc->base.sk and before calling lock_sock(sk).
To prevent the sk free, as a holder of the sk, ep should be alive when
calling lock_sock(). This patch uses call_rcu() and moves sock_put and
ep free into sctp_endpoint_destroy_rcu(), so that it's safe to try to
hold the ep under rcu_read_lock in sctp_transport_traverse_process().
If sctp_endpoint_hold() returns true, it means this ep is still alive
and we have held it and can continue to dump it; If it returns false,
it means this ep is dead and can be freed after rcu_read_unlock, and
we should skip it.
In sctp_sock_dump(), after locking the sk, if this ep is different from
tsp->asoc->ep, it means during this dumping, this asoc was peeled off
before calling lock_sock(), and the sk should be skipped; If this ep is
the same with tsp->asoc->ep, it means no peeloff happens on this asoc,
and due to lock_sock, no peeloff will happen either until release_sock.
Note that delaying endpoint free won't delay the port release, as the
port release happens in sctp_endpoint_destroy() before calling call_rcu().
Also, freeing endpoint by call_rcu() makes it safe to access the sk by
asoc->base.sk in sctp_assocs_seq_show() and sctp_rcv().
Thanks Jones to bring this issue up.
v1->v2:
- improve the changelog.
- add kfree(ep) into sctp_endpoint_destroy_rcu(), as Jakub noticed.
Reported-by: syzbot+9276d76e83e3bcde6c99(a)syzkaller.appspotmail.com
Reported-by: Lee Jones <lee.jones(a)linaro.org>
Fixes: d25adbeb0cdb ("sctp: fix an use-after-free issue in sctp_sock_dump")
Signed-off-by: Xin Long <lucien.xin(a)gmail.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Huang Guobin <huangguobin4(a)huawei.com>
Reviewed-by: Wei Yongjun <weiyongjun1(a)huawei.com>
Reviewed-by: Xiu Jianfeng <xiujianfeng(a)huawei.com>
Signed-off-by: Yongqiang Liu <liuyongqiang13(a)huawei.com>
---
include/net/sctp/sctp.h | 6 +++---
include/net/sctp/structs.h | 3 ++-
net/sctp/diag.c | 12 ++++++------
net/sctp/endpointola.c | 23 +++++++++++++++--------
net/sctp/socket.c | 23 +++++++++++++++--------
5 files changed, 41 insertions(+), 26 deletions(-)
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index fef8c3372aee..6d201c6700be 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -118,6 +118,7 @@ extern struct percpu_counter sctp_sockets_allocated;
int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
+typedef int (*sctp_callback_t)(struct sctp_endpoint *, struct sctp_transport *, void *);
void sctp_transport_walk_start(struct rhashtable_iter *iter);
void sctp_transport_walk_stop(struct rhashtable_iter *iter);
struct sctp_transport *sctp_transport_get_next(struct net *net,
@@ -128,9 +129,8 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
struct net *net,
const union sctp_addr *laddr,
const union sctp_addr *paddr, void *p);
-int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
- int (*cb_done)(struct sctp_transport *, void *),
- struct net *net, int *pos, void *p);
+int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
+ struct net *net, int *pos, void *p);
int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
struct sctp_info *info);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 2882bc7a5b4b..18f9924aa250 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1348,6 +1348,7 @@ struct sctp_endpoint {
u32 secid;
u32 peer_secid;
+ struct rcu_head rcu;
};
/* Recover the outter endpoint structure. */
@@ -1363,7 +1364,7 @@ static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base)
struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t);
void sctp_endpoint_free(struct sctp_endpoint *);
void sctp_endpoint_put(struct sctp_endpoint *);
-void sctp_endpoint_hold(struct sctp_endpoint *);
+int sctp_endpoint_hold(struct sctp_endpoint *ep);
void sctp_endpoint_add_asoc(struct sctp_endpoint *, struct sctp_association *);
struct sctp_association *sctp_endpoint_lookup_assoc(
const struct sctp_endpoint *ep,
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index 8767405de9fa..0a9db0a7f423 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -307,9 +307,8 @@ static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
return err;
}
-static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
+static int sctp_sock_dump(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
{
- struct sctp_endpoint *ep = tsp->asoc->ep;
struct sctp_comm_param *commp = p;
struct sock *sk = ep->base.sk;
struct sk_buff *skb = commp->skb;
@@ -319,6 +318,8 @@ static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
int err = 0;
lock_sock(sk);
+ if (ep != tsp->asoc->ep)
+ goto release;
list_for_each_entry(assoc, &ep->asocs, asocs) {
if (cb->args[4] < cb->args[1])
goto next;
@@ -361,9 +362,8 @@ static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
return err;
}
-static int sctp_sock_filter(struct sctp_transport *tsp, void *p)
+static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
{
- struct sctp_endpoint *ep = tsp->asoc->ep;
struct sctp_comm_param *commp = p;
struct sock *sk = ep->base.sk;
const struct inet_diag_req_v2 *r = commp->r;
@@ -521,8 +521,8 @@ static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
goto done;
- sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump,
- net, &pos, &commp);
+ sctp_transport_traverse_process(sctp_sock_filter, sctp_sock_dump,
+ net, &pos, &commp);
cb->args[2] = pos;
done:
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 8640dedcf64f..c4068451b9c7 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -242,6 +242,18 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
}
/* Final destructor for endpoint. */
+static void sctp_endpoint_destroy_rcu(struct rcu_head *head)
+{
+ struct sctp_endpoint *ep = container_of(head, struct sctp_endpoint, rcu);
+ struct sock *sk = ep->base.sk;
+
+ sctp_sk(sk)->ep = NULL;
+ sock_put(sk);
+
+ kfree(ep);
+ SCTP_DBG_OBJCNT_DEC(ep);
+}
+
static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
{
struct sock *sk;
@@ -275,18 +287,13 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
if (sctp_sk(sk)->bind_hash)
sctp_put_port(sk);
- sctp_sk(sk)->ep = NULL;
- /* Give up our hold on the sock */
- sock_put(sk);
-
- kfree(ep);
- SCTP_DBG_OBJCNT_DEC(ep);
+ call_rcu(&ep->rcu, sctp_endpoint_destroy_rcu);
}
/* Hold a reference to an endpoint. */
-void sctp_endpoint_hold(struct sctp_endpoint *ep)
+int sctp_endpoint_hold(struct sctp_endpoint *ep)
{
- refcount_inc(&ep->base.refcnt);
+ return refcount_inc_not_zero(&ep->base.refcnt);
}
/* Release a reference to an endpoint and clean up if there are
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index cfa54e56add9..0e979a273dcd 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5059,11 +5059,12 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
}
EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
-int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
- int (*cb_done)(struct sctp_transport *, void *),
- struct net *net, int *pos, void *p) {
+int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
+ struct net *net, int *pos, void *p)
+{
struct rhashtable_iter hti;
struct sctp_transport *tsp;
+ struct sctp_endpoint *ep;
int ret;
again:
@@ -5072,26 +5073,32 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
- ret = cb(tsp, p);
- if (ret)
- break;
+ ep = tsp->asoc->ep;
+ if (sctp_endpoint_hold(ep)) { /* asoc can be peeled off */
+ ret = cb(ep, tsp, p);
+ if (ret)
+ break;
+ sctp_endpoint_put(ep);
+ }
(*pos)++;
sctp_transport_put(tsp);
}
sctp_transport_walk_stop(&hti);
if (ret) {
- if (cb_done && !cb_done(tsp, p)) {
+ if (cb_done && !cb_done(ep, tsp, p)) {
(*pos)++;
+ sctp_endpoint_put(ep);
sctp_transport_put(tsp);
goto again;
}
+ sctp_endpoint_put(ep);
sctp_transport_put(tsp);
}
return ret;
}
-EXPORT_SYMBOL_GPL(sctp_for_each_transport);
+EXPORT_SYMBOL_GPL(sctp_transport_traverse_process);
/* 7.2.1 Association Status (SCTP_STATUS)
--
2.25.1