From: Zhen Lei thunder.leizhen@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I6WAZX
--------------------------------
When the number of cores is greater than the number of ECMDQs, the number of ECMDQs occupied by each NUMA node is less than the number of cores of the node. Therefore, the first smmu->nr_ecmdq cores do not cover all ECMDQs.
For example: --------------------------------------- | Node0 | Node1 | |---------------------------------------| | 0 1 2 3 | 4 5 6 7 | CPU ID |---------------------------------------| | 0 1 | 2 3 | ECMDQ ID ---------------------------------------
Fixes: 3965519baff5 ("iommu/arm-smmu-v3: Add support for less than one ECMDQ per core")
Signed-off-by: Zhen Lei thunder.leizhen@huawei.com Reviewed-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Jialin Zhang zhangjialin11@huawei.com --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 114 ++++++++++++-------- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 3 +- 2 files changed, 73 insertions(+), 44 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index be8cc53659f8..1ee14a59a3d6 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -386,7 +386,7 @@ static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu) if (smmu->ecmdq_enabled) { struct arm_smmu_ecmdq *ecmdq;
- ecmdq = *this_cpu_ptr(smmu->ecmdq); + ecmdq = *this_cpu_ptr(smmu->ecmdqs);
return &ecmdq->cmdq; } @@ -485,7 +485,7 @@ static void arm_smmu_ecmdq_skip_err(struct arm_smmu_device *smmu) for (i = 0; i < smmu->nr_ecmdq; i++) { unsigned long flags;
- ecmdq = *per_cpu_ptr(smmu->ecmdq, i); + ecmdq = *per_cpu_ptr(smmu->ecmdqs, i); q = &ecmdq->cmdq.q;
prod = readl_relaxed(q->prod_reg); @@ -4549,9 +4549,50 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu) return ret; }
+static int arm_smmu_ecmdq_reset(struct arm_smmu_device *smmu) +{ + int i, cpu, ret = 0; + u32 reg; + + if (!smmu->nr_ecmdq) + return 0; + + i = 0; + for_each_possible_cpu(cpu) { + struct arm_smmu_ecmdq *ecmdq; + struct arm_smmu_queue *q; + + ecmdq = *per_cpu_ptr(smmu->ecmdqs, cpu); + if (ecmdq != per_cpu_ptr(smmu->ecmdq, cpu)) + continue; + + q = &ecmdq->cmdq.q; + i++; + + if (WARN_ON(q->llq.prod != q->llq.cons)) { + q->llq.prod = 0; + q->llq.cons = 0; + } + writeq_relaxed(q->q_base, ecmdq->base + ARM_SMMU_ECMDQ_BASE); + writel_relaxed(q->llq.prod, ecmdq->base + ARM_SMMU_ECMDQ_PROD); + writel_relaxed(q->llq.cons, ecmdq->base + ARM_SMMU_ECMDQ_CONS); + + /* enable ecmdq */ + writel(ECMDQ_PROD_EN | q->llq.prod, q->prod_reg); + ret = readl_relaxed_poll_timeout(q->cons_reg, reg, reg & ECMDQ_CONS_ENACK, + 1, ARM_SMMU_POLL_TIMEOUT_US); + if (ret) { + dev_err(smmu->dev, "ecmdq[%d] enable failed\n", i); + smmu->ecmdq_enabled = 0; + break; + } + } + + return ret; +} + static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool resume) { - int i; int ret; u32 reg, enables; struct arm_smmu_cmdq_ent cmd; @@ -4599,31 +4640,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool resume) writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD); writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
- for (i = 0; i < smmu->nr_ecmdq; i++) { - struct arm_smmu_ecmdq *ecmdq; - struct arm_smmu_queue *q; - - ecmdq = *per_cpu_ptr(smmu->ecmdq, i); - q = &ecmdq->cmdq.q; - - if (WARN_ON(q->llq.prod != q->llq.cons)) { - q->llq.prod = 0; - q->llq.cons = 0; - } - writeq_relaxed(q->q_base, ecmdq->base + ARM_SMMU_ECMDQ_BASE); - writel_relaxed(q->llq.prod, ecmdq->base + ARM_SMMU_ECMDQ_PROD); - writel_relaxed(q->llq.cons, ecmdq->base + ARM_SMMU_ECMDQ_CONS); - - /* enable ecmdq */ - writel(ECMDQ_PROD_EN | q->llq.prod, q->prod_reg); - ret = readl_relaxed_poll_timeout(q->cons_reg, reg, reg & ECMDQ_CONS_ENACK, - 1, ARM_SMMU_POLL_TIMEOUT_US); - if (ret) { - dev_err(smmu->dev, "ecmdq[%d] enable failed\n", i); - smmu->ecmdq_enabled = 0; - break; - } - } + arm_smmu_ecmdq_reset(smmu);
enables = CR0_CMDQEN; ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, @@ -4723,10 +4740,11 @@ static int arm_smmu_ecmdq_layout(struct arm_smmu_device *smmu) ecmdq = devm_alloc_percpu(smmu->dev, *ecmdq); if (!ecmdq) return -ENOMEM; + smmu->ecmdq = ecmdq;
if (num_possible_cpus() <= smmu->nr_ecmdq) { for_each_possible_cpu(cpu) - *per_cpu_ptr(smmu->ecmdq, cpu) = per_cpu_ptr(ecmdq, cpu); + *per_cpu_ptr(smmu->ecmdqs, cpu) = per_cpu_ptr(ecmdq, cpu);
/* A core requires at most one ECMDQ */ smmu->nr_ecmdq = num_possible_cpus(); @@ -4763,7 +4781,16 @@ static int arm_smmu_ecmdq_layout(struct arm_smmu_device *smmu) * may be left due to truncation rounding. */ nr_ecmdqs[node] = nr_cpus_node(node) * nr_remain / num_possible_cpus(); + } + + for_each_node(node) { + if (!nr_cpus_node(node)) + continue; + nr_remain -= nr_ecmdqs[node]; + + /* An ECMDQ has been reserved for each node at above [1] */ + nr_ecmdqs[node]++; }
/* Divide the remaining ECMDQs */ @@ -4781,25 +4808,23 @@ static int arm_smmu_ecmdq_layout(struct arm_smmu_device *smmu) }
for_each_node(node) { - int i, round, shared = 0; + int i, round, shared;
if (!nr_cpus_node(node)) continue;
- /* An ECMDQ has been reserved for each node at above [1] */ - nr_ecmdqs[node]++; - + shared = 0; if (nr_ecmdqs[node] < nr_cpus_node(node)) shared = 1;
i = 0; for_each_cpu(cpu, cpumask_of_node(node)) { round = i % nr_ecmdqs[node]; - if (i++ < nr_ecmdqs[node]) { + if (i++ < nr_ecmdqs[node]) ecmdqs[round] = per_cpu_ptr(ecmdq, cpu); + else ecmdqs[round]->cmdq.shared = shared; - } - *per_cpu_ptr(smmu->ecmdq, cpu) = ecmdqs[round]; + *per_cpu_ptr(smmu->ecmdqs, cpu) = ecmdqs[round]; } }
@@ -4823,6 +4848,8 @@ static int arm_smmu_ecmdq_probe(struct arm_smmu_device *smmu) numq = 1 << FIELD_GET(IDR6_LOG2NUMQ, reg); smmu->nr_ecmdq = nump * numq; gap = ECMDQ_CP_RRESET_SIZE >> FIELD_GET(IDR6_LOG2NUMQ, reg); + if (!smmu->nr_ecmdq) + return -EOPNOTSUPP;
smmu_dma_base = (vmalloc_to_pfn(smmu->base) << PAGE_SHIFT); cp_regs = ioremap(smmu_dma_base + ARM_SMMU_ECMDQ_CP_BASE, PAGE_SIZE); @@ -4855,8 +4882,8 @@ static int arm_smmu_ecmdq_probe(struct arm_smmu_device *smmu) if (!cp_base) return -ENOMEM;
- smmu->ecmdq = devm_alloc_percpu(smmu->dev, struct arm_smmu_ecmdq *); - if (!smmu->ecmdq) + smmu->ecmdqs = devm_alloc_percpu(smmu->dev, struct arm_smmu_ecmdq *); + if (!smmu->ecmdqs) return -ENOMEM;
ret = arm_smmu_ecmdq_layout(smmu); @@ -4870,7 +4897,7 @@ static int arm_smmu_ecmdq_probe(struct arm_smmu_device *smmu) struct arm_smmu_ecmdq *ecmdq; struct arm_smmu_queue *q;
- ecmdq = *per_cpu_ptr(smmu->ecmdq, cpu); + ecmdq = *per_cpu_ptr(smmu->ecmdqs, cpu); q = &ecmdq->cmdq.q;
/* @@ -4878,10 +4905,11 @@ static int arm_smmu_ecmdq_probe(struct arm_smmu_device *smmu) * CPUs. The CPUs that are not selected are not showed in * cpumask_of_node(node), their 'ecmdq' may be NULL. * - * (q->ecmdq_prod & ECMDQ_PROD_EN) indicates that the ECMDQ is - * shared by multiple cores and has been initialized. + * (ecmdq != per_cpu_ptr(smmu->ecmdq, cpu)) indicates that the + * ECMDQ is shared by multiple cores and should be initialized + * only by the first owner. */ - if (!ecmdq || (q->ecmdq_prod & ECMDQ_PROD_EN)) + if (!ecmdq || (ecmdq != per_cpu_ptr(smmu->ecmdq, cpu))) continue; ecmdq->base = cp_base + addr;
@@ -5324,7 +5352,7 @@ static int arm_smmu_ecmdq_disable(struct device *dev) struct arm_smmu_device *smmu = dev_get_drvdata(dev);
for (i = 0; i < smmu->nr_ecmdq; i++) { - ecmdq = *per_cpu_ptr(smmu->ecmdq, i); + ecmdq = *per_cpu_ptr(smmu->ecmdqs, i); q = &ecmdq->cmdq.q;
prod = readl_relaxed(q->prod_reg); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index f680cd6dd3bd..919473d2217b 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -718,7 +718,8 @@ struct arm_smmu_device { u32 nr_ecmdq; u32 ecmdq_enabled; }; - struct arm_smmu_ecmdq *__percpu *ecmdq; + struct arm_smmu_ecmdq *__percpu *ecmdqs; + struct arm_smmu_ecmdq __percpu *ecmdq;
struct arm_smmu_cmdq cmdq; struct arm_smmu_evtq evtq;
From: "Darrick J. Wong" djwong@kernel.org
mainline inclusion from mainline-v6.3-rc6 commit 22ed903eee23a5b174e240f1cdfa9acf393a5210 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I6X4UN CVE: CVE-2023-2124
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
syzbot detected a crash during log recovery:
XFS (loop0): Mounting V5 Filesystem bfdc47fc-10d8-4eed-a562-11a831b3f791 XFS (loop0): Torn write (CRC failure) detected at log block 0x180. Truncating head block from 0x200. XFS (loop0): Starting recovery (logdev: internal) ================================================================== BUG: KASAN: slab-out-of-bounds in xfs_btree_lookup_get_block+0x15c/0x6d0 fs/xfs/libxfs/xfs_btree.c:1813 Read of size 8 at addr ffff88807e89f258 by task syz-executor132/5074
CPU: 0 PID: 5074 Comm: syz-executor132 Not tainted 6.2.0-rc1-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 10/26/2022 Call Trace: <TASK> __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x1b1/0x290 lib/dump_stack.c:106 print_address_description+0x74/0x340 mm/kasan/report.c:306 print_report+0x107/0x1f0 mm/kasan/report.c:417 kasan_report+0xcd/0x100 mm/kasan/report.c:517 xfs_btree_lookup_get_block+0x15c/0x6d0 fs/xfs/libxfs/xfs_btree.c:1813 xfs_btree_lookup+0x346/0x12c0 fs/xfs/libxfs/xfs_btree.c:1913 xfs_btree_simple_query_range+0xde/0x6a0 fs/xfs/libxfs/xfs_btree.c:4713 xfs_btree_query_range+0x2db/0x380 fs/xfs/libxfs/xfs_btree.c:4953 xfs_refcount_recover_cow_leftovers+0x2d1/0xa60 fs/xfs/libxfs/xfs_refcount.c:1946 xfs_reflink_recover_cow+0xab/0x1b0 fs/xfs/xfs_reflink.c:930 xlog_recover_finish+0x824/0x920 fs/xfs/xfs_log_recover.c:3493 xfs_log_mount_finish+0x1ec/0x3d0 fs/xfs/xfs_log.c:829 xfs_mountfs+0x146a/0x1ef0 fs/xfs/xfs_mount.c:933 xfs_fs_fill_super+0xf95/0x11f0 fs/xfs/xfs_super.c:1666 get_tree_bdev+0x400/0x620 fs/super.c:1282 vfs_get_tree+0x88/0x270 fs/super.c:1489 do_new_mount+0x289/0xad0 fs/namespace.c:3145 do_mount fs/namespace.c:3488 [inline] __do_sys_mount fs/namespace.c:3697 [inline] __se_sys_mount+0x2d3/0x3c0 fs/namespace.c:3674 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd RIP: 0033:0x7f89fa3f4aca Code: 83 c4 08 5b 5d c3 66 2e 0f 1f 84 00 00 00 00 00 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 49 89 ca b8 a5 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 c0 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007fffd5fb5ef8 EFLAGS: 00000206 ORIG_RAX: 00000000000000a5 RAX: ffffffffffffffda RBX: 00646975756f6e2c RCX: 00007f89fa3f4aca RDX: 0000000020000100 RSI: 0000000020009640 RDI: 00007fffd5fb5f10 RBP: 00007fffd5fb5f10 R08: 00007fffd5fb5f50 R09: 000000000000970d R10: 0000000000200800 R11: 0000000000000206 R12: 0000000000000004 R13: 0000555556c6b2c0 R14: 0000000000200800 R15: 00007fffd5fb5f50 </TASK>
The fuzzed image contains an AGF with an obviously garbage agf_refcount_level value of 32, and a dirty log with a buffer log item for that AGF. The ondisk AGF has a higher LSN than the recovered log item. xlog_recover_buf_commit_pass2 reads the buffer, compares the LSNs, and decides to skip replay because the ondisk buffer appears to be newer.
Unfortunately, the ondisk buffer is corrupt, but recovery just read the buffer with no buffer ops specified:
error = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, buf_flags, &bp, NULL);
Skipping the buffer leaves its contents in memory unverified. This sets us up for a kernel crash because xfs_refcount_recover_cow_leftovers reads the buffer (which is still around in XBF_DONE state, so no read verification) and creates a refcountbt cursor of height 32. This is impossible so we run off the end of the cursor object and crash.
Fix this by invoking the verifier on all skipped buffers and aborting log recovery if the ondisk buffer is corrupt. It might be smarter to force replay the log item atop the buffer and then see if it'll pass the write verifier (like ext4 does) but for now let's go with the conservative option where we stop immediately.
Link: https://syzkaller.appspot.com/bug?extid=7e9494b8b399902e994e Signed-off-by: Darrick J. Wong djwong@kernel.org Reviewed-by: Dave Chinner dchinner@redhat.com Signed-off-by: Dave Chinner david@fromorbit.com Signed-off-by: Long Li leo.lilong@huawei.com Reviewed-by: Zhang Yi yi.zhang@huawei.com Reviewed-by: Xiu Jianfeng xiujianfeng@huawei.com Signed-off-by: Jialin Zhang zhangjialin11@huawei.com --- fs/xfs/xfs_buf_item_recover.c | 10 ++++++++++ 1 file changed, 10 insertions(+)
diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c index 4b2ad8dce690..d892125eb022 100644 --- a/fs/xfs/xfs_buf_item_recover.c +++ b/fs/xfs/xfs_buf_item_recover.c @@ -943,6 +943,16 @@ xlog_recover_buf_commit_pass2( if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { trace_xfs_log_recover_buf_skip(log, buf_f); xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN); + + /* + * We're skipping replay of this buffer log item due to the log + * item LSN being behind the ondisk buffer. Verify the buffer + * contents since we aren't going to run the write verifier. + */ + if (bp->b_ops) { + bp->b_ops->verify_read(bp); + error = bp->b_error; + } goto out_release; }
From: Pablo Neira Ayuso pablo@netfilter.org
stable inclusion from stable-v5.10.180 commit e044a24447189419c3a7ccc5fa6da7516036dc55 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I71F49 CVE: CVE-2023-32233
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=...
--------------------------------
commit c1592a89942e9678f7d9c8030efa777c0d57edab upstream.
Toggle deleted anonymous sets as inactive in the next generation, so users cannot perform any update on it. Clear the generation bitmask in case the transaction is aborted.
The following KASAN splat shows a set element deletion for a bound anonymous set that has been already removed in the same transaction.
[ 64.921510] ================================================================== [ 64.923123] BUG: KASAN: wild-memory-access in nf_tables_commit+0xa24/0x1490 [nf_tables] [ 64.924745] Write of size 8 at addr dead000000000122 by task test/890 [ 64.927903] CPU: 3 PID: 890 Comm: test Not tainted 6.3.0+ #253 [ 64.931120] Call Trace: [ 64.932699] <TASK> [ 64.934292] dump_stack_lvl+0x33/0x50 [ 64.935908] ? nf_tables_commit+0xa24/0x1490 [nf_tables] [ 64.937551] kasan_report+0xda/0x120 [ 64.939186] ? nf_tables_commit+0xa24/0x1490 [nf_tables] [ 64.940814] nf_tables_commit+0xa24/0x1490 [nf_tables] [ 64.942452] ? __kasan_slab_alloc+0x2d/0x60 [ 64.944070] ? nf_tables_setelem_notify+0x190/0x190 [nf_tables] [ 64.945710] ? kasan_set_track+0x21/0x30 [ 64.947323] nfnetlink_rcv_batch+0x709/0xd90 [nfnetlink] [ 64.948898] ? nfnetlink_rcv_msg+0x480/0x480 [nfnetlink]
Signed-off-by: Pablo Neira Ayuso pablo@netfilter.org Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Lu Wei luwei32@huawei.com Reviewed-by: Yue Haibing yuehaibing@huawei.com Reviewed-by: Xiu Jianfeng xiujianfeng@huawei.com Signed-off-by: Jialin Zhang zhangjialin11@huawei.com --- include/net/netfilter/nf_tables.h | 1 + net/netfilter/nf_tables_api.c | 12 ++++++++++++ net/netfilter/nft_dynset.c | 2 +- net/netfilter/nft_lookup.c | 2 +- net/netfilter/nft_objref.c | 2 +- 5 files changed, 16 insertions(+), 3 deletions(-)
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 5b6803cd3299..f0870b382678 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -507,6 +507,7 @@ struct nft_set_binding { };
enum nft_trans_phase; +void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set); void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding, enum nft_trans_phase phase); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 4806169c2349..936dc1dad2c3 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -4479,12 +4479,24 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, } }
+void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set) +{ + if (nft_set_is_anonymous(set)) + nft_clear(ctx->net, set); + + set->use++; +} +EXPORT_SYMBOL_GPL(nf_tables_activate_set); + void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding, enum nft_trans_phase phase) { switch (phase) { case NFT_TRANS_PREPARE: + if (nft_set_is_anonymous(set)) + nft_deactivate_next(ctx->net, set); + set->use--; return; case NFT_TRANS_ABORT: diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 8c45e01fecdd..038588d4d80e 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -233,7 +233,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx, { struct nft_dynset *priv = nft_expr_priv(expr);
- priv->set->use++; + nf_tables_activate_set(ctx, priv->set); }
static void nft_dynset_destroy(const struct nft_ctx *ctx, diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index b0f558b4fea5..8bc008ff00cb 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c @@ -132,7 +132,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx, { struct nft_lookup *priv = nft_expr_priv(expr);
- priv->set->use++; + nf_tables_activate_set(ctx, priv->set); }
static void nft_lookup_destroy(const struct nft_ctx *ctx, diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c index bc104d36d3bb..25157d8cc250 100644 --- a/net/netfilter/nft_objref.c +++ b/net/netfilter/nft_objref.c @@ -180,7 +180,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx, { struct nft_objref_map *priv = nft_expr_priv(expr);
- priv->set->use++; + nf_tables_activate_set(ctx, priv->set); }
static void nft_objref_map_destroy(const struct nft_ctx *ctx,