Kernel
Threads by month
- ----- 2025 -----
- July
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- 57 participants
- 19203 discussions
fixup CVE-2024-26751
Nikita Shubin (1):
[Backport] ARM: ep93xx: Add terminator to gpiod_lookup_table
arch/arm/mach-ep93xx/core.c | 1 +
1 file changed, 1 insertion(+)
--
2.34.1
2
2

[PATCH openEuler-1.0-LTS] ext4: avoid allocating blocks from corrupted group in ext4_mb_find_by_goal()
by ZhaoLong Wang 09 Apr '24
by ZhaoLong Wang 09 Apr '24
09 Apr '24
From: Baokun Li <libaokun1(a)huawei.com>
mainline inclusion
from mainline-v6.8-rc3
commit 832698373a25950942c04a512daa652c18a9b513
category: bugfix
bugzilla: 189763
CVE: CVE-2024-26772
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
Places the logic for checking if the group's block bitmap is corrupt under
the protection of the group lock to avoid allocating blocks from the group
with a corrupted block bitmap.
Signed-off-by: Baokun Li <libaokun1(a)huawei.com>
Reviewed-by: Jan Kara <jack(a)suse.cz>
Link: https://lore.kernel.org/r/20240104142040.2835097-8-libaokun1@huawei.com
Signed-off-by: Theodore Ts'o <tytso(a)mit.edu>
Signed-off-by: ZhaoLong Wang <wangzhaolong1(a)huawei.com>
---
fs/ext4/mballoc.c | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index c07289164c12..4c52e74946ab 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1836,12 +1836,10 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
if (err)
return err;
- if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
- ext4_mb_unload_buddy(e4b);
- return 0;
- }
-
ext4_lock_group(ac->ac_sb, group);
+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
+ goto out;
+
max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
ac->ac_g_ex.fe_len, &ex);
ex.fe_logical = 0xDEADFA11; /* debug value */
@@ -1874,6 +1872,7 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
ac->ac_b_ex = ex;
ext4_mb_use_best_found(ac, e4b);
}
+out:
ext4_unlock_group(ac->ac_sb, group);
ext4_mb_unload_buddy(e4b);
--
2.39.2
2
1
This patchset fixes CVE-2024-26771.
Conflicts of patch 2 and 3 is cause by commit 31f4b28f6c41 ("dmaengine:
ti: edma: Add support for handling reserved channels") not being merged,
which has been adapted.
Conflicts of patch 4 is a context conflict which does not effect the
effectiveness of this patch.
v2:
Removed CVE dedication for the first 3 patches.
Chuhong Yuan (2):
dmaengine: ti: edma: fix missed failure handling
dmaengine: ti: edma: add missed operations
Kunwu Chan (1):
dmaengine: ti: edma: Add some null pointer checks to the edma_probe
Wei Yongjun (1):
dmaengine: ti: edma: Fix error return code in edma_probe()
drivers/dma/ti/edma.c | 53 +++++++++++++++++++++++++++++--------------
1 file changed, 36 insertions(+), 17 deletions(-)
--
2.34.1
2
5

[openeuler:OLK-5.10 9479/30000] mm/dynamic_hugetlb.c:198:55: sparse: sparse: incorrect type in initializer (different address spaces)
by kernel test robot 09 Apr '24
by kernel test robot 09 Apr '24
09 Apr '24
tree: https://gitee.com/openeuler/kernel.git OLK-5.10
head: 03571c277d6443f7ba8712ac6c9073fa9fa0d3bf
commit: a8a836a36072aacbc4aaf08b8b3c8a654dbc0157 [9479/30000] mm/dynamic_hugetlb: establish the dynamic hugetlb feature framework
config: x86_64-randconfig-r132-20240409 (https://download.01.org/0day-ci/archive/20240409/202404091402.qUFwznOY-lkp@…)
compiler: clang version 17.0.6 (https://github.com/llvm/llvm-project 6009708b4367171ccdbf4b5905cb6a803753fe18)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240409/202404091402.qUFwznOY-lkp@…)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp(a)intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202404091402.qUFwznOY-lkp@intel.com/
sparse warnings: (new ones prefixed by >>)
>> mm/dynamic_hugetlb.c:198:55: sparse: sparse: incorrect type in initializer (different address spaces) @@ expected struct cgroup_subsys_state *css @@ got struct cgroup_subsys_state [noderef] __rcu * @@
mm/dynamic_hugetlb.c:198:55: sparse: expected struct cgroup_subsys_state *css
mm/dynamic_hugetlb.c:198:55: sparse: got struct cgroup_subsys_state [noderef] __rcu *
vim +198 mm/dynamic_hugetlb.c
195
196 int hugetlb_pool_destroy(struct cgroup *cgrp)
197 {
> 198 struct cgroup_subsys_state *css = cgrp->subsys[memory_cgrp_id];
199 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
200 struct dhugetlb_pool *hpool = memcg ? memcg->hpool : NULL;
201 int ret = 0;
202
203 if (!dhugetlb_enabled)
204 return 0;
205
206 if (!hpool || hpool->attach_memcg != memcg)
207 return 0;
208
209 ret = free_hugepage_to_hugetlb(hpool);
210 memcg->hpool = NULL;
211
212 put_hpool(hpool);
213 return ret;
214 }
215
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
1
0
This patchset fixes CVE-2024-26771.
Chuhong Yuan (2):
dmaengine: ti: edma: fix missed failure handling
dmaengine: ti: edma: add missed operations
Kunwu Chan (1):
dmaengine: ti: edma: Add some null pointer checks to the edma_probe
Wei Yongjun (1):
dmaengine: ti: edma: Fix error return code in edma_probe()
drivers/dma/ti/edma.c | 53 +++++++++++++++++++++++++++++--------------
1 file changed, 36 insertions(+), 17 deletions(-)
--
2.34.1
2
5
fix CVE-2024-26788
Curtis Klein (1):
[Backport] dmaengine: fsl-qdma: init irq after reg initialization
drivers/dma/fsl-qdma.c | 17 ++++++++---------
1 file changed, 8 insertions(+), 9 deletions(-)
--
2.34.1
2
2

[PATCH OLK-5.10] net/sched: act_mirred: use the backlog for mirred ingress
by Zhengchao Shao 09 Apr '24
by Zhengchao Shao 09 Apr '24
09 Apr '24
From: Jakub Kicinski <kuba(a)kernel.org>
mainline inclusion
from mainline-v6.8-rc6
commit 52f671db18823089a02f07efc04efdb2272ddc17
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9E2LT
CVE: CVE-2024-26740
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
The test Davide added in commit ca22da2fbd69 ("act_mirred: use the backlog
for nested calls to mirred ingress") hangs our testing VMs every 10 or so
runs, with the familiar tcp_v4_rcv -> tcp_v4_rcv deadlock reported by
lockdep.
The problem as previously described by Davide (see Link) is that
if we reverse flow of traffic with the redirect (egress -> ingress)
we may reach the same socket which generated the packet. And we may
still be holding its socket lock. The common solution to such deadlocks
is to put the packet in the Rx backlog, rather than run the Rx path
inline. Do that for all egress -> ingress reversals, not just once
we started to nest mirred calls.
In the past there was a concern that the backlog indirection will
lead to loss of error reporting / less accurate stats. But the current
workaround does not seem to address the issue.
Fixes: 53592b364001 ("net/sched: act_mirred: Implement ingress actions")
Cc: Marcelo Ricardo Leitner <marcelo.leitner(a)gmail.com>
Suggested-by: Davide Caratti <dcaratti(a)redhat.com>
Link: https://lore.kernel.org/netdev/33dc43f587ec1388ba456b4915c75f02a8aae226.166…
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
Acked-by: Jamal Hadi Salim <jhs(a)mojatatu.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Conflicts:
net/sched/act_mirred.c
Signed-off-by: Zhengchao Shao <shaozhengchao(a)huawei.com>
---
net/sched/act_mirred.c | 14 +++++---------
.../testing/selftests/net/forwarding/tc_actions.sh | 3 ---
2 files changed, 5 insertions(+), 12 deletions(-)
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 91a19460cb57..66c9f356a876 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -206,18 +206,14 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
return err;
}
-static bool is_mirred_nested(void)
-{
- return unlikely(__this_cpu_read(mirred_nest_level) > 1);
-}
-
-static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
+static int
+tcf_mirred_forward(bool at_ingress, bool want_ingress, struct sk_buff *skb)
{
int err;
if (!want_ingress)
err = dev_queue_xmit(skb);
- else if (is_mirred_nested())
+ else if (!at_ingress)
err = netif_rx(skb);
else
err = netif_receive_skb(skb);
@@ -314,7 +310,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
/* let's the caller reinsert the packet, if possible */
if (use_reinsert) {
res->ingress = want_ingress;
- err = tcf_mirred_forward(res->ingress, skb);
+ err = tcf_mirred_forward(at_ingress, res->ingress, skb);
if (err)
tcf_action_inc_overlimit_qstats(&m->common);
__this_cpu_dec(mirred_nest_level);
@@ -322,7 +318,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
}
}
- err = tcf_mirred_forward(want_ingress, skb2);
+ err = tcf_mirred_forward(at_ingress, want_ingress, skb2);
if (err) {
out:
tcf_action_inc_overlimit_qstats(&m->common);
diff --git a/tools/testing/selftests/net/forwarding/tc_actions.sh b/tools/testing/selftests/net/forwarding/tc_actions.sh
index e396e24d30e0..d6614faf9fe3 100755
--- a/tools/testing/selftests/net/forwarding/tc_actions.sh
+++ b/tools/testing/selftests/net/forwarding/tc_actions.sh
@@ -188,9 +188,6 @@ mirred_egress_to_ingress_tcp_test()
check_err $? "didn't mirred redirect ICMP"
tc_check_packets "dev $h1 ingress" 102 10
check_err $? "didn't drop mirred ICMP"
- local overlimits=$(tc_rule_stats_get ${h1} 101 egress .overlimits)
- test ${overlimits} = 10
- check_err $? "wrong overlimits, expected 10 got ${overlimits}"
tc filter del dev $h1 egress protocol ip pref 100 handle 100 flower
tc filter del dev $h1 egress protocol ip pref 101 handle 101 flower
--
2.34.1
2
1
CVE-2021-47144
Jingwen Chen (1):
drm/amd/amdgpu: fix refcount leak
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 3 +++
1 file changed, 3 insertions(+)
--
2.25.1
2
2

09 Apr '24
hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I9F351
--------------------------------------
The commit d1e388dbca87 ("KVM: arm64: vgic-its: Avoid potential UAF in LPI
translation cache") fix CVE-2024-26598 erroneously, correct it.
Fixes: d1e388dbca87 ("KVM: arm64: vgic-its: Avoid potential UAF in LPI translation cache")
Signed-off-by: Jinjie Ruan <ruanjinjie(a)huawei.com>
---
arch/arm64/kvm/vgic/vgic-its.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index 9c7bd5a8aa35..1868cc0f0fee 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -782,7 +782,6 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = true;
vgic_queue_irq_unlock(kvm, irq, flags);
- vgic_put_irq(kvm, irq);
return 0;
}
@@ -801,6 +800,7 @@ int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = true;
vgic_queue_irq_unlock(kvm, irq, flags);
+ vgic_put_irq(kvm, irq);
return 0;
}
--
2.34.1
2
1
*** BLURB HERE ***
Kunwu Chan (1):
mfd: syscon: Fix null pointer dereference in of_syscon_register()
drivers/mfd/syscon.c | 4 ++++
1 file changed, 4 insertions(+)
--
2.34.1
2
2

09 Apr '24
From: w00623716 <wushuai51(a)huawei.com>
The RDMA driver supports the following features:
Supports Huawei SP600 series NICs;
Supports RoCEv2;
Supports RoCE XRC, UD, UC, and RC modes;
Supports RoCE UC, RC, and UD local switching;
Supports RoCE MR, PD, CQ, QoS, QP, and SRQ management;
Supports RoCE congestion control;
Supports RoCE Bond;
Supports RoCE FLR;
Supports RoCE entry specifications;
Supports RoCE error detection and reporting;
w00623716 (2):
net/ethernet/huawei/hinic3: Add the CQM on which the RDMA depends
infiniband/hw/hiroce3: Add Huawei Intelligent Network Card RDMA Driver
arch/arm64/configs/openeuler_defconfig | 1 +
arch/x86/configs/openeuler_defconfig | 1 +
drivers/infiniband/Kconfig | 1 +
drivers/infiniband/hw/Makefile | 1 +
drivers/infiniband/hw/hiroce3/Kconfig | 14 +
drivers/infiniband/hw/hiroce3/Makefile | 157 +
.../infiniband/hw/hiroce3/bond/roce_bond.h | 149 +
.../hw/hiroce3/bond/roce_bond_common.c | 950 +
drivers/infiniband/hw/hiroce3/cq/roce_cq.h | 265 +
.../infiniband/hw/hiroce3/cq/roce_cq_common.c | 202 +
.../infiniband/hw/hiroce3/cq/roce_cq_cqe.c | 731 +
.../infiniband/hw/hiroce3/cq/roce_cq_create.c | 604 +
.../infiniband/hw/hiroce3/cq/roce_cq_ctrl.c | 872 +
.../hw/hiroce3/cq/roce_cq_destroy.c | 298 +
drivers/infiniband/hw/hiroce3/dfx/roce_dfx.c | 126 +
drivers/infiniband/hw/hiroce3/dfx/roce_dfx.h | 184 +
.../infiniband/hw/hiroce3/dfx/roce_dfx_cap.c | 676 +
.../infiniband/hw/hiroce3/dfx/roce_dfx_cap.h | 184 +
.../hw/hiroce3/dfx/roce_dfx_query.c | 615 +
.../hiroce3/extension/roce_cdev_extension.c | 18 +
.../hiroce3/extension/roce_event_extension.c | 30 +
.../hiroce3/extension/roce_main_extension.c | 206 +
.../hw/hiroce3/extension/roce_mr_extension.c | 38 +
.../hiroce3/extension/roce_netdev_extension.c | 174 +
.../hw/hiroce3/extension/roce_qp_extension.c | 251 +
.../extension/roce_qp_post_send_extension.c | 20 +
.../hw/hiroce3/extension/roce_srq_extension.c | 35 +
.../hw/hiroce3/host/crypt/hisec_cfg.c | 148 +
.../hw/hiroce3/host/crypt/hisec_cfg.h | 26 +
.../hw/hiroce3/host/crypt/hisec_hwrand.c | 229 +
.../hw/hiroce3/host/crypt/hisec_hwrand.h | 14 +
.../host/crypt/linux/kernel/hisec_crypt_dev.h | 29 +
.../crypt/linux/kernel/hisec_crypt_main.c | 123 +
.../hw/hiroce3/host/crypt/readme.txt | 1 +
.../hw/hiroce3/host/hmm/hmm_buddy.c | 170 +
.../hw/hiroce3/host/hmm/hmm_buddy.h | 36 +
.../infiniband/hw/hiroce3/host/hmm/hmm_comp.c | 165 +
.../infiniband/hw/hiroce3/host/hmm/hmm_comp.h | 225 +
.../hw/hiroce3/host/hmm/hmm_comp_init.c | 131 +
.../hw/hiroce3/host/hmm/hmm_comp_mtt.c | 497 +
.../hw/hiroce3/host/hmm/hmm_comp_mw_mr.c | 222 +
.../hw/hiroce3/host/hmm/hmm_comp_res.c | 63 +
.../infiniband/hw/hiroce3/host/hmm/hmm_em.c | 348 +
.../infiniband/hw/hiroce3/host/hmm/hmm_em.h | 48 +
.../infiniband/hw/hiroce3/host/hmm/hmm_mr.c | 403 +
.../infiniband/hw/hiroce3/host/hmm/hmm_mr.h | 34 +
.../infiniband/hw/hiroce3/host/hmm/hmm_umem.c | 322 +
.../infiniband/hw/hiroce3/host/hmm/hmm_umem.h | 124 +
.../hw/hiroce3/host/mt/hinic3_devlink.c | 428 +
.../hw/hiroce3/host/mt/hinic3_devlink.h | 173 +
.../hw/hiroce3/host/mt/hinic3_hw_mt.c | 605 +
.../hw/hiroce3/host/mt/hinic3_hw_mt.h | 49 +
.../hw/hiroce3/host/mt/hinic3_nictool.c | 982 +
.../hw/hiroce3/host/mt/hinic3_nictool.h | 35 +
.../infiniband/hw/hiroce3/host/mt/readme.txt | 1 +
.../hw/hiroce3/host/vram/vram_common.c | 192 +
.../hw/hiroce3/include/crypt/hisec_mpu_cmd.h | 38 +
.../include/crypt/hisec_mpu_cmd_defs.h | 118 +
.../hw/hiroce3/include/crypt/hisec_npu_cmd.h | 30 +
.../include/crypt/hisec_npu_cmd_defs.h | 415 +
.../hw/hiroce3/include/hinic3_hmm.h | 83 +
.../hw/hiroce3/include/hinic3_rdma.h | 203 +
.../hw/hiroce3/include/hinic3_srv_nic.h | 218 +
.../hw/hiroce3/include/hw/db_srv_type_pub.h | 126 +
.../hw/hiroce3/include/hw/node_id.h | 58 +
.../hw/register/infra/cnb_c_union_define.h | 318 +
.../hw/register/infra/cnb_reg_offset.h | 42 +
.../hw/register/infra/cpb_c_union_define.h | 8234 +
.../hw/register/infra/cpb_reg_offset.h | 1157 +
.../hw/register/infra/cpi_c_union_define.h | 23677 +++
.../register/infra/cpi_dfx_glb_reg_offset.h | 236 +
.../hw/register/infra/cpi_reg_offset.h | 4698 +
.../hw/register/infra/crypto_c_union_define.h | 6226 +
.../hw/register/infra/crypto_reg_offset.h | 1181 +
.../hw/register/infra/esch_c_union_define.h | 1340 +
.../hw/register/infra/esch_reg_offset.h | 802 +
.../infra/hi1823_csr_sm_addr_define.h | 384 +
.../hw/register/infra/hi1823_csr_sm_typedef.h | 12259 ++
.../register/infra/hinic3_csr_addr_common.h | 254 +
.../hw/register/infra/icdq_c_union_define.h | 1062 +
.../hw/register/infra/icdq_reg_offset.h | 114 +
.../hw/register/infra/ipsurx_c_union_define.h | 4805 +
.../hw/register/infra/ipsurx_reg_offset.h | 1469 +
.../hw/register/infra/ipsurx_typedef.h | 30688 ++++
.../hw/register/infra/ipsutx_c_union_define.h | 2872 +
.../hw/register/infra/ipsutx_reg_offset.h | 429 +
.../hw/register/infra/lcam_c_union_define.h | 741 +
.../hw/register/infra/lcam_reg_offset.h | 72 +
.../hw/register/infra/mqm_c_union_define.h | 11885 ++
.../hw/register/infra/mqm_reg_offset.h | 3696 +
.../hw/register/infra/oq_c_union_define.h | 2285 +
.../include/hw/register/infra/oq_reg_offset.h | 261 +
.../hw/register/infra/pe_c_union_define.h | 1819 +
.../include/hw/register/infra/pe_reg_offset.h | 493 +
.../hw/register/infra/pqm_c_union_define.h | 2091 +
.../hw/register/infra/pqm_reg_offset.h | 291 +
.../hw/register/infra/prmrx_c_union_define.h | 2937 +
.../hw/register/infra/prmrx_reg_offset.h | 258 +
.../hw/register/infra/prmtx_c_union_define.h | 2758 +
.../hw/register/infra/prmtx_reg_offset.h | 264 +
.../register/infra/ring_cnb_c_union_define.h | 314 +
.../hw/register/infra/ring_cnb_reg_offset.h | 52 +
.../hw/register/infra/sm_c_union_define.h | 6441 +
.../include/hw/register/infra/sm_reg_offset.h | 860 +
.../hw/register/infra/stffq_c_union_define.h | 6438 +
.../hw/register/infra/stffq_reg_offset.h | 1059 +
.../hw/register/infra/stfiq_c_union_define.h | 1413 +
.../hw/register/infra/stfiq_reg_offset.h | 148 +
.../register/infra/stfisch_c_union_define.h | 420 +
.../hw/register/infra/stfisch_reg_offset.h | 828 +
.../hw/register/infra/stlfq_c_union_define.h | 6358 +
.../hw/register/infra/stlfq_reg_offset.h | 535 +
.../hw/register/infra/stliq_c_union_define.h | 1109 +
.../hw/register/infra/stliq_reg_offset.h | 106 +
.../register/infra/stlisch_c_union_define.h | 425 +
.../hw/register/infra/stlisch_reg_offset.h | 527 +
.../hw/register/infra/tile_c_union_define.h | 3399 +
.../hw/register/infra/tile_reg_offset.h | 399 +
.../hw/register/infra/virtio_c_union_define.h | 4349 +
.../hw/register/infra/virtio_reg_offset.h | 275 +
.../hw/register/mag/c_union_define_mag_top.h | 1695 +
.../hw/register/mag/c_union_define_smag_cfg.h | 1457 +
.../mag/hi1822_csr_mag_offset_union_define.h | 22 +
.../hw/register/mag/mac_reg_an_lth60_offset.h | 483 +
.../hw/register/mag/mac_reg_com_offset.h | 33 +
.../hw/register/mag/mac_reg_mib_offset.h | 26 +
.../hw/register/mag/mac_reg_rx_brfec_offset.h | 68 +
.../hw/register/mag/mac_reg_rx_mac_offset.h | 95 +
.../hw/register/mag/mac_reg_rx_pcs_offset.h | 343 +
.../hw/register/mag/mac_reg_rx_rsfec_offset.h | 503 +
.../register/mag/mac_reg_rxpma_core_offset.h | 213 +
.../register/mag/mac_reg_rxpma_lane_offset.h | 44 +
.../hw/register/mag/mac_reg_tx_brfec_offset.h | 62 +
.../hw/register/mag/mac_reg_tx_mac_offset.h | 109 +
.../hw/register/mag/mac_reg_tx_pcs_offset.h | 169 +
.../hw/register/mag/mac_reg_tx_rsfec_offset.h | 75 +
.../register/mag/mac_reg_txpma_core_offset.h | 19 +
.../register/mag/mac_reg_txpma_lane_offset.h | 43 +
.../mag/mag_fc_sds_harden_reg_offset.h | 60 +
.../hw/register/mag/mag_top_reg_offset.h | 379 +
.../hw/register/mag/smag_cfg_reg_offset.h | 277 +
.../hw/register/misc/sfc_c_union_define.h | 721 +
.../include/hw/register/misc/sfc_reg_offset.h | 217 +
.../hw/register/mpu/c_union_define_crg.h | 625 +
.../include/hw/register/mpu/crg_reg_offset.h | 68 +
.../hw/register/mpu/mpu_c_union_define.h | 15330 ++
.../register/mpu/mpu_harden_c_union_define.h | 795 +
.../hw/register/mpu/mpu_harden_reg_offset.h | 461 +
.../include/hw/register/mpu/mpu_reg_offset.h | 2115 +
.../hw/register/pcie/hva_peh_c_union_define.h | 3236 +
.../hw/register/pcie/hva_peh_reg_offset.h | 214 +
.../hw/register/pcie/pcie5_ap_addr_define.h | 40529 +++++
.../hw/register/pcie/pcie5_ap_typedef.h | 42419 ++++++
.../hw/register/pcie/pcie5_core_addr_define.h | 3233 +
.../hw/register/pcie/pcie5_core_typedef.h | 118101 +++++++++++++++
.../hw/register/pcie/pcie5_pcs_addr_define.h | 286 +
.../hw/register/pcie/pcie5_pcs_typedef.h | 11995 ++
.../hw/register/rxtx/apb2ff_reg_offset.h | 68 +
.../hw/register/rxtx/c_union_define_apb2ff.h | 82 +
.../register/top/mag_harden_c_union_define.h | 484 +
.../hw/register/top/mag_harden_reg_offset.h | 55 +
.../register/top/pcie_harden_c_union_define.h | 738 +
.../hw/register/top/pcie_harden_reg_offset.h | 65 +
.../register/top/smf0_harden_c_union_define.h | 1611 +
.../hw/register/top/smf0_harden_reg_offset.h | 69 +
.../top/stlqu_harden_c_union_define.h | 662 +
.../hw/register/top/stlqu_harden_reg_offset.h | 69 +
.../register/top/top_cpb_harden_reg_offset.h | 42 +
.../top_encryp_decryp_harden_c_union_define.h | 48 +
.../top/top_encryp_decryp_harden_reg_offset.h | 73 +
.../hw/register/top/top_iocfg_reg_offset.h | 81 +
.../register/totem/arm_ras_c_union_define.h | 192 +
.../hw/register/totem/avs_reg_offset.h | 875 +
.../register/totem/ddrc_arm_ras_reg_offset.h | 31 +
.../hw/register/totem/ddrc_dmc_c_union_def.h | 102 +
.../hw/register/totem/ddrc_dmc_reg_offset.h | 314 +
.../register/totem/ddrc_rasc_c_union_define.h | 1411 +
.../hw/register/totem/ddrc_rasc_reg_offset.h | 192 +
.../hw/register/totem/efuse_reg_offset.h | 125 +
.../hw/register/totem/fabric_c_union_define.h | 1629 +
.../hw/register/totem/fabric_reg_offset.h | 102 +
.../hw/register/totem/hha_c_union_def.h | 62 +
.../hw/register/totem/hva_c_union_define.h | 1287 +
.../hw/register/totem/hva_reg_offset.h | 110 +
.../register/totem/hva_sm23_c_union_define.h | 43 +
.../hw/register/totem/hva_sm23_reg_offset.h | 37 +
.../register/totem/hva_smf_c_union_define.h | 1194 +
.../hw/register/totem/hva_smf_reg_offset.h | 189 +
.../hw/register/totem/mbigen_c_union_define.h | 643 +
.../hw/register/totem/mbigen_reg_offset.h | 956 +
.../hw/register/totem/pcie4_c_union_define.h | 5992 +
.../hw/register/totem/pcie4_reg_offset.h | 2240 +
.../register/totem/phosphor_c_union_define.h | 2841 +
.../hw/register/totem/phosphor_reg_offset.h | 149 +
.../include/hw/register/totem/plat_efuse.h | 368 +
.../hw/register/totem/ras_c_union_define.h | 295 +
.../hw/register/totem/ras_reg_offset.h | 110 +
.../hw/register/totem/sioe_reg_offset.h | 426 +
.../hw/register/totem/sllc_reg_offset.h | 150 +
.../hw/register/totem/smmu_c_union_define.h | 2448 +
.../hw/register/totem/smmu_reg_offset.h | 256 +
.../hw/register/totem/sysctrl_reg_offset.h | 316 +
.../hw/hiroce3/include/hw/service_type_pub.h | 135 +
.../hw/hiroce3/include/hw/smf_cache_type.h | 142 +
.../hw/hiroce3/include/hw/smf_instance_id.h | 168 +
.../hw/hiroce3/include/hw/sml_table.h | 4079 +
.../include/hw/sml_table_define_cloud.h | 273 +
.../include/hw/sml_table_define_compute_dpu.h | 12 +
.../hw/sml_table_define_compute_roce.h | 137 +
.../hw/sml_table_define_compute_standard.h | 234 +
.../include/hw/sml_table_define_fpga.h | 340 +
.../hiroce3/include/hw/sml_table_define_llt.h | 361 +
.../include/hw/sml_table_define_storage_fc.h | 65 +
.../hw/sml_table_define_storage_fc_adapt.h | 65 +
.../hw/sml_table_define_storage_roce.h | 149 +
.../hw/sml_table_define_storage_roceaa.h | 164 +
.../include/hw/sml_table_define_storage_toe.h | 113 +
.../hw/hiroce3/include/hw/sml_table_pub.h | 330 +
.../hw/hiroce3/include/hw/tile_spec.h | 28 +
.../hw/hiroce3/include/mag/mag_mpu_cmd.h | 77 +
.../hw/hiroce3/include/mag/mag_mpu_cmd_defs.h | 928 +
.../include/micro_log/hinic3_micro_log.c | 1156 +
.../include/micro_log/hinic3_micro_log.h | 169 +
.../hw/hiroce3/include/nic/nic_cfg_comm.h | 62 +
.../hw/hiroce3/include/nic/nic_mpu_cmd.h | 147 +
.../hw/hiroce3/include/nic/nic_mpu_cmd_defs.h | 1317 +
.../hw/hiroce3/include/nic/nic_npu_cmd.h | 31 +
.../hw/hiroce3/include/nic/nic_npu_cmd_defs.h | 140 +
.../hw/hiroce3/include/nic/nic_npu_wqe_defs.h | 240 +
.../include/public/npu_cmdq_base_defs.h | 241 +
.../include/rdma/rdma_context_format.h | 4435 +
.../include/rdma/rdma_ext_ctx_format.h | 382 +
.../hw/hiroce3/include/rdma/roce_ccf_format.h | 725 +
.../hiroce3/include/rdma/roce_compile_macro.h | 91 +
.../hw/hiroce3/include/rdma/roce_ctx_api.h | 265 +
.../hw/hiroce3/include/rdma/roce_dif_format.h | 459 +
.../hw/hiroce3/include/rdma/roce_err_type.h | 125 +
.../hiroce3/include/rdma/roce_hmm_context.h | 196 +
.../hw/hiroce3/include/rdma/roce_mpu_common.h | 241 +
.../include/rdma/roce_mpu_ulp_common.h | 56 +
.../hw/hiroce3/include/rdma/roce_pub.h | 262 +
.../hw/hiroce3/include/rdma/roce_pub_cmd.h | 264 +
.../hw/hiroce3/include/rdma/roce_ulp.h | 177 +
.../hw/hiroce3/include/rdma/roce_vbs_format.h | 208 +
.../hw/hiroce3/include/rdma/roce_verbs_attr.h | 400 +
.../include/rdma/roce_verbs_attr_qpc_chip.h | 355 +
.../hw/hiroce3/include/rdma/roce_verbs_cmd.h | 250 +
.../hiroce3/include/rdma/roce_verbs_cq_attr.h | 170 +
.../include/rdma/roce_verbs_ext_attr.h | 51 +
.../hiroce3/include/rdma/roce_verbs_format.h | 134 +
.../include/rdma/roce_verbs_gid_attr.h | 112 +
.../hiroce3/include/rdma/roce_verbs_mr_attr.h | 299 +
.../hw/hiroce3/include/rdma/roce_verbs_pub.h | 226 +
.../include/rdma/roce_verbs_srq_attr.h | 229 +
.../include/rdma/roce_verbs_ulp_format.h | 94 +
.../hw/hiroce3/include/rdma/roce_wqe_format.h | 825 +
.../hw/hiroce3/include/rdma/roce_xqe_format.h | 500 +
.../infiniband/hw/hiroce3/include/readme.txt | 1 +
.../hw/hiroce3/include/roce_cdev_extension.h | 20 +
.../hw/hiroce3/include/roce_event_extension.h | 20 +
.../hw/hiroce3/include/roce_main_extension.h | 84 +
.../hw/hiroce3/include/roce_mr_extension.h | 24 +
.../hiroce3/include/roce_netdev_extension.h | 26 +
.../hw/hiroce3/include/roce_qp_extension.h | 69 +
.../include/roce_qp_post_send_extension.h | 24 +
.../hw/hiroce3/include/roce_srq_extension.h | 20 +
drivers/infiniband/hw/hiroce3/mr/roce_mr.c | 1305 +
drivers/infiniband/hw/hiroce3/mr/roce_mr.h | 105 +
drivers/infiniband/hw/hiroce3/qp/roce_post.h | 198 +
drivers/infiniband/hw/hiroce3/qp/roce_qp.h | 255 +
.../infiniband/hw/hiroce3/qp/roce_qp_create.c | 1296 +
.../hw/hiroce3/qp/roce_qp_destroy.c | 289 +
.../infiniband/hw/hiroce3/qp/roce_qp_exp.h | 86 +
.../infiniband/hw/hiroce3/qp/roce_qp_modify.c | 2162 +
.../hw/hiroce3/qp/roce_qp_post_recv.c | 253 +
.../hw/hiroce3/qp/roce_qp_post_send.c | 1257 +
.../infiniband/hw/hiroce3/qp/roce_qp_query.c | 386 +
.../infiniband/hw/hiroce3/rdma/rdma_bitmap.c | 141 +
.../infiniband/hw/hiroce3/rdma/rdma_bitmap.h | 41 +
.../infiniband/hw/hiroce3/rdma/rdma_comp.c | 29 +
.../infiniband/hw/hiroce3/rdma/rdma_comp.h | 138 +
.../hw/hiroce3/rdma/rdma_comp_gid.c | 275 +
.../hw/hiroce3/rdma/rdma_comp_init.c | 387 +
.../hw/hiroce3/rdma/rdma_comp_mw_mr.c | 244 +
.../infiniband/hw/hiroce3/rdma/rdma_comp_pd.c | 57 +
.../hw/hiroce3/rdma/rdma_comp_res.c | 247 +
drivers/infiniband/hw/hiroce3/roce.h | 635 +
drivers/infiniband/hw/hiroce3/roce_cdev.c | 1218 +
drivers/infiniband/hw/hiroce3/roce_cmd.c | 753 +
drivers/infiniband/hw/hiroce3/roce_cmd.h | 78 +
drivers/infiniband/hw/hiroce3/roce_compat.h | 60 +
drivers/infiniband/hw/hiroce3/roce_cqm_cmd.c | 57 +
drivers/infiniband/hw/hiroce3/roce_cqm_cmd.h | 20 +
drivers/infiniband/hw/hiroce3/roce_db.c | 103 +
drivers/infiniband/hw/hiroce3/roce_db.h | 36 +
drivers/infiniband/hw/hiroce3/roce_event.c | 568 +
drivers/infiniband/hw/hiroce3/roce_event.h | 43 +
drivers/infiniband/hw/hiroce3/roce_k_ioctl.h | 96 +
drivers/infiniband/hw/hiroce3/roce_main.c | 1752 +
drivers/infiniband/hw/hiroce3/roce_mix.c | 1402 +
drivers/infiniband/hw/hiroce3/roce_mix.h | 210 +
drivers/infiniband/hw/hiroce3/roce_netdev.c | 858 +
drivers/infiniband/hw/hiroce3/roce_netdev.h | 65 +
drivers/infiniband/hw/hiroce3/roce_netlink.c | 368 +
drivers/infiniband/hw/hiroce3/roce_netlink.h | 171 +
drivers/infiniband/hw/hiroce3/roce_pd.c | 137 +
drivers/infiniband/hw/hiroce3/roce_pd.h | 31 +
drivers/infiniband/hw/hiroce3/roce_sysfs.c | 1787 +
drivers/infiniband/hw/hiroce3/roce_sysfs.h | 115 +
drivers/infiniband/hw/hiroce3/roce_user.h | 72 +
drivers/infiniband/hw/hiroce3/roce_xrc.c | 203 +
drivers/infiniband/hw/hiroce3/roce_xrc.h | 30 +
drivers/infiniband/hw/hiroce3/srq/roce_srq.h | 204 +
.../infiniband/hw/hiroce3/srq/roce_srq_comm.c | 97 +
.../hw/hiroce3/srq/roce_srq_create.c | 710 +
.../infiniband/hw/hiroce3/srq/roce_srq_ctrl.c | 634 +
drivers/net/ethernet/huawei/hinic3/Makefile | 26 +-
.../ethernet/huawei/hinic3/bond/hinic3_bond.c | 1054 +
.../ethernet/huawei/hinic3/bond/hinic3_bond.h | 96 +
.../ethernet/huawei/hinic3/comm_msg_intf.h | 565 +-
.../ethernet/huawei/hinic3/cqm/cqm_bat_cla.c | 2062 +
.../ethernet/huawei/hinic3/cqm/cqm_bat_cla.h | 215 +
.../huawei/hinic3/cqm/cqm_bitmap_table.c | 1461 +
.../huawei/hinic3/cqm/cqm_bitmap_table.h | 67 +
.../huawei/hinic3/cqm/cqm_bloomfilter.c | 521 +
.../huawei/hinic3/cqm/cqm_bloomfilter.h | 53 +
.../net/ethernet/huawei/hinic3/cqm/cqm_cmd.c | 202 +
.../net/ethernet/huawei/hinic3/cqm/cqm_cmd.h | 39 +
.../net/ethernet/huawei/hinic3/cqm/cqm_db.c | 479 +
.../net/ethernet/huawei/hinic3/cqm/cqm_db.h | 36 +
.../ethernet/huawei/hinic3/cqm/cqm_define.h | 54 +
.../net/ethernet/huawei/hinic3/cqm/cqm_main.c | 1685 +
.../net/ethernet/huawei/hinic3/cqm/cqm_main.h | 426 +
.../ethernet/huawei/hinic3/cqm/cqm_memsec.c | 674 +
.../ethernet/huawei/hinic3/cqm/cqm_memsec.h | 23 +
.../ethernet/huawei/hinic3/cqm/cqm_object.c | 1539 +
.../ethernet/huawei/hinic3/cqm/cqm_object.h | 715 +
.../huawei/hinic3/cqm/cqm_object_intern.c | 1459 +
.../huawei/hinic3/cqm/cqm_object_intern.h | 93 +
.../net/ethernet/huawei/hinic3/cqm/readme.txt | 3 +
.../huawei/hinic3/hw/hinic3_api_cmd.h | 42 +-
.../ethernet/huawei/hinic3/hw/hinic3_cmdq.c | 63 +-
.../ethernet/huawei/hinic3/hw/hinic3_cmdq.h | 38 +-
.../ethernet/huawei/hinic3/hw/hinic3_csr.h | 1 +
.../huawei/hinic3/hw/hinic3_devlink.c | 46 +-
.../huawei/hinic3/hw/hinic3_devlink.h | 24 +
.../ethernet/huawei/hinic3/hw/hinic3_eqs.c | 57 +-
.../ethernet/huawei/hinic3/hw/hinic3_eqs.h | 4 +-
.../ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c | 153 +-
.../ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h | 15 +-
.../huawei/hinic3/hw/hinic3_hw_comm.c | 152 +-
.../huawei/hinic3/hw/hinic3_hw_comm.h | 2 +-
.../ethernet/huawei/hinic3/hw/hinic3_hw_mt.c | 43 +-
.../ethernet/huawei/hinic3/hw/hinic3_hwdev.c | 232 +-
.../ethernet/huawei/hinic3/hw/hinic3_hwdev.h | 35 +-
.../ethernet/huawei/hinic3/hw/hinic3_hwif.c | 54 +
.../ethernet/huawei/hinic3/hw/hinic3_mbox.c | 122 +-
.../ethernet/huawei/hinic3/hw/hinic3_mbox.h | 82 +-
.../ethernet/huawei/hinic3/hw/hinic3_mgmt.c | 83 +-
.../ethernet/huawei/hinic3/hw/hinic3_mgmt.h | 27 +-
.../huawei/hinic3/hw/hinic3_multi_host_mgmt.c | 1226 +
.../huawei/hinic3/hw/hinic3_multi_host_mgmt.h | 124 +
.../huawei/hinic3/hw/hinic3_pci_id_tbl.h | 36 +-
.../huawei/hinic3/hw/ossl_knl_linux.c | 210 +-
.../net/ethernet/huawei/hinic3/hw/readme.txt | 10 +
.../hinic3/include/bond/bond_common_defs.h | 115 +
.../include/cfg_mgmt/cfg_mgmt_mpu_cmd.h | 15 +
.../include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h | 215 +
.../huawei/hinic3/include/cqm/cqm_npu_cmd.h | 25 +
.../hinic3/include/cqm/cqm_npu_cmd_defs.h | 65 +
.../huawei/hinic3/include/hinic3_common.h | 203 +
.../huawei/hinic3/include/hinic3_cqm.h | 848 +
.../huawei/hinic3/include/hinic3_cqm_define.h | 52 +
.../huawei/hinic3/{ => include}/hinic3_crm.h | 119 +-
.../huawei/hinic3/{ => include}/hinic3_hw.h | 74 +-
.../huawei/hinic3/include/hinic3_lld.h | 225 +
.../huawei/hinic3/include/hinic3_mt.h | 826 +
.../huawei/hinic3/include/hinic3_profile.h | 148 +
.../huawei/hinic3/include/hinic3_wq.h | 162 +
.../ethernet/huawei/hinic3/include/hiudk3.h | 81 +
.../huawei/hinic3/include/hiudk3_common.h | 38 +
.../huawei/hinic3/include/hiudk3_cqm.h | 786 +
.../huawei/hinic3/include/hiudk3_crm.h | 489 +
.../huawei/hinic3/include/hiudk3_crm_adapt.h | 646 +
.../huawei/hinic3/include/hiudk3_hw.h | 665 +
.../huawei/hinic3/include/hiudk3_lld.h | 196 +
.../huawei/hinic3/include/hiudk3_wq.h | 122 +
.../huawei/hinic3/include/hiudk_adpt.h | 213 +
.../hinic3/include/hiudk_sdk_cqm_adpt.h | 460 +
.../huawei/hinic3/include/hiudk_sdk_crm.h | 335 +
.../hinic3/include/hiudk_sdk_crm_adpt.h | 726 +
.../huawei/hinic3/include/hiudk_sdk_hw_adpt.h | 337 +
.../hinic3/include/hiudk_sdk_hw_comm_adpt.h | 412 +
.../hinic3/include/mpu/mpu_board_defs.h | 75 +
.../hinic3/include/mpu/mpu_cmd_base_defs.h | 122 +
.../hinic3/include/mpu/mpu_inband_cmd.h | 184 +
.../hinic3/include/mpu/mpu_inband_cmd_defs.h | 1150 +
.../hinic3/include/mpu/mpu_outband_mctp_cmd.h | 35 +
.../include/mpu/mpu_outband_mctp_cmd_defs.h | 162 +
.../hinic3/include/mpu/mpu_outband_ncsi_cmd.h | 196 +
.../include/mpu/mpu_outband_ncsi_cmd_defs.h | 1683 +
.../include/mpu/mpu_outband_smbus_cmd.h | 56 +
.../include/mpu/mpu_outband_smbus_cmd_defs.h | 289 +
.../huawei/hinic3/include/ossl_ctype_ex.h | 202 +
.../huawei/hinic3/{ => include}/ossl_knl.h | 5 +
.../huawei/hinic3/include/ossl_types.h | 173 +
.../huawei/hinic3/include/ossl_user.h | 91 +
.../include/public/npu_cmdq_base_defs.h | 241 +
.../ethernet/huawei/hinic3/include/readme.txt | 1 +
.../hinic3/include/vmsec/vmsec_mpu_common.h | 139 +
.../huawei/hinic3/include/vram_common.h | 91 +
.../ethernet/huawei/hinic3/ossl_knl_linux.h | 1117 +-
412 files changed, 532031 insertions(+), 1013 deletions(-)
create mode 100644 drivers/infiniband/hw/hiroce3/Kconfig
create mode 100755 drivers/infiniband/hw/hiroce3/Makefile
create mode 100644 drivers/infiniband/hw/hiroce3/bond/roce_bond.h
create mode 100644 drivers/infiniband/hw/hiroce3/bond/roce_bond_common.c
create mode 100644 drivers/infiniband/hw/hiroce3/cq/roce_cq.h
create mode 100644 drivers/infiniband/hw/hiroce3/cq/roce_cq_common.c
create mode 100644 drivers/infiniband/hw/hiroce3/cq/roce_cq_cqe.c
create mode 100644 drivers/infiniband/hw/hiroce3/cq/roce_cq_create.c
create mode 100644 drivers/infiniband/hw/hiroce3/cq/roce_cq_ctrl.c
create mode 100644 drivers/infiniband/hw/hiroce3/cq/roce_cq_destroy.c
create mode 100644 drivers/infiniband/hw/hiroce3/dfx/roce_dfx.c
create mode 100644 drivers/infiniband/hw/hiroce3/dfx/roce_dfx.h
create mode 100644 drivers/infiniband/hw/hiroce3/dfx/roce_dfx_cap.c
create mode 100644 drivers/infiniband/hw/hiroce3/dfx/roce_dfx_cap.h
create mode 100644 drivers/infiniband/hw/hiroce3/dfx/roce_dfx_query.c
create mode 100644 drivers/infiniband/hw/hiroce3/extension/roce_cdev_extension.c
create mode 100644 drivers/infiniband/hw/hiroce3/extension/roce_event_extension.c
create mode 100644 drivers/infiniband/hw/hiroce3/extension/roce_main_extension.c
create mode 100644 drivers/infiniband/hw/hiroce3/extension/roce_mr_extension.c
create mode 100644 drivers/infiniband/hw/hiroce3/extension/roce_netdev_extension.c
create mode 100755 drivers/infiniband/hw/hiroce3/extension/roce_qp_extension.c
create mode 100644 drivers/infiniband/hw/hiroce3/extension/roce_qp_post_send_extension.c
create mode 100644 drivers/infiniband/hw/hiroce3/extension/roce_srq_extension.c
create mode 100644 drivers/infiniband/hw/hiroce3/host/crypt/hisec_cfg.c
create mode 100644 drivers/infiniband/hw/hiroce3/host/crypt/hisec_cfg.h
create mode 100644 drivers/infiniband/hw/hiroce3/host/crypt/hisec_hwrand.c
create mode 100644 drivers/infiniband/hw/hiroce3/host/crypt/hisec_hwrand.h
create mode 100644 drivers/infiniband/hw/hiroce3/host/crypt/linux/kernel/hisec_crypt_dev.h
create mode 100644 drivers/infiniband/hw/hiroce3/host/crypt/linux/kernel/hisec_crypt_main.c
create mode 100644 drivers/infiniband/hw/hiroce3/host/crypt/readme.txt
create mode 100644 drivers/infiniband/hw/hiroce3/host/hmm/hmm_buddy.c
create mode 100644 drivers/infiniband/hw/hiroce3/host/hmm/hmm_buddy.h
create mode 100644 drivers/infiniband/hw/hiroce3/host/hmm/hmm_comp.c
create mode 100644 drivers/infiniband/hw/hiroce3/host/hmm/hmm_comp.h
create mode 100644 drivers/infiniband/hw/hiroce3/host/hmm/hmm_comp_init.c
create mode 100644 drivers/infiniband/hw/hiroce3/host/hmm/hmm_comp_mtt.c
create mode 100644 drivers/infiniband/hw/hiroce3/host/hmm/hmm_comp_mw_mr.c
create mode 100644 drivers/infiniband/hw/hiroce3/host/hmm/hmm_comp_res.c
create mode 100644 drivers/infiniband/hw/hiroce3/host/hmm/hmm_em.c
create mode 100644 drivers/infiniband/hw/hiroce3/host/hmm/hmm_em.h
create mode 100644 drivers/infiniband/hw/hiroce3/host/hmm/hmm_mr.c
create mode 100644 drivers/infiniband/hw/hiroce3/host/hmm/hmm_mr.h
create mode 100644 drivers/infiniband/hw/hiroce3/host/hmm/hmm_umem.c
create mode 100644 drivers/infiniband/hw/hiroce3/host/hmm/hmm_umem.h
create mode 100644 drivers/infiniband/hw/hiroce3/host/mt/hinic3_devlink.c
create mode 100644 drivers/infiniband/hw/hiroce3/host/mt/hinic3_devlink.h
create mode 100644 drivers/infiniband/hw/hiroce3/host/mt/hinic3_hw_mt.c
create mode 100644 drivers/infiniband/hw/hiroce3/host/mt/hinic3_hw_mt.h
create mode 100644 drivers/infiniband/hw/hiroce3/host/mt/hinic3_nictool.c
create mode 100644 drivers/infiniband/hw/hiroce3/host/mt/hinic3_nictool.h
create mode 100644 drivers/infiniband/hw/hiroce3/host/mt/readme.txt
create mode 100644 drivers/infiniband/hw/hiroce3/host/vram/vram_common.c
create mode 100644 drivers/infiniband/hw/hiroce3/include/crypt/hisec_mpu_cmd.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/crypt/hisec_mpu_cmd_defs.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/crypt/hisec_npu_cmd.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/crypt/hisec_npu_cmd_defs.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hinic3_hmm.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hinic3_rdma.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hinic3_srv_nic.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/db_srv_type_pub.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/node_id.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/cnb_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/cnb_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/cpb_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/cpb_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/cpi_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/cpi_dfx_glb_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/cpi_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/crypto_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/crypto_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/esch_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/esch_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/hi1823_csr_sm_addr_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/hi1823_csr_sm_typedef.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/hinic3_csr_addr_common.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/icdq_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/icdq_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/ipsurx_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/ipsurx_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/ipsurx_typedef.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/ipsutx_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/ipsutx_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/lcam_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/lcam_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/mqm_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/mqm_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/oq_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/oq_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/pe_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/pe_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/pqm_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/pqm_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/prmrx_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/prmrx_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/prmtx_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/prmtx_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/ring_cnb_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/ring_cnb_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/sm_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/sm_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/stffq_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/stffq_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/stfiq_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/stfiq_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/stfisch_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/stfisch_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/stlfq_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/stlfq_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/stliq_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/stliq_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/stlisch_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/stlisch_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/tile_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/tile_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/virtio_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/infra/virtio_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/c_union_define_mag_top.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/c_union_define_smag_cfg.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/hi1822_csr_mag_offset_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/mac_reg_an_lth60_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/mac_reg_com_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/mac_reg_mib_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/mac_reg_rx_brfec_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/mac_reg_rx_mac_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/mac_reg_rx_pcs_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/mac_reg_rx_rsfec_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/mac_reg_rxpma_core_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/mac_reg_rxpma_lane_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/mac_reg_tx_brfec_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/mac_reg_tx_mac_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/mac_reg_tx_pcs_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/mac_reg_tx_rsfec_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/mac_reg_txpma_core_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/mac_reg_txpma_lane_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/mag_fc_sds_harden_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/mag_top_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mag/smag_cfg_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/misc/sfc_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/misc/sfc_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mpu/c_union_define_crg.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mpu/crg_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mpu/mpu_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mpu/mpu_harden_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mpu/mpu_harden_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/mpu/mpu_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/pcie/hva_peh_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/pcie/hva_peh_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/pcie/pcie5_ap_addr_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/pcie/pcie5_ap_typedef.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/pcie/pcie5_core_addr_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/pcie/pcie5_core_typedef.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/pcie/pcie5_pcs_addr_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/pcie/pcie5_pcs_typedef.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/rxtx/apb2ff_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/rxtx/c_union_define_apb2ff.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/top/mag_harden_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/top/mag_harden_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/top/pcie_harden_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/top/pcie_harden_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/top/smf0_harden_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/top/smf0_harden_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/top/stlqu_harden_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/top/stlqu_harden_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/top/top_cpb_harden_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/top/top_encryp_decryp_harden_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/top/top_encryp_decryp_harden_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/top/top_iocfg_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/arm_ras_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/avs_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/ddrc_arm_ras_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/ddrc_dmc_c_union_def.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/ddrc_dmc_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/ddrc_rasc_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/ddrc_rasc_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/efuse_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/fabric_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/fabric_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/hha_c_union_def.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/hva_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/hva_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/hva_sm23_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/hva_sm23_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/hva_smf_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/hva_smf_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/mbigen_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/mbigen_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/pcie4_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/pcie4_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/phosphor_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/phosphor_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/plat_efuse.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/ras_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/ras_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/sioe_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/sllc_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/smmu_c_union_define.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/smmu_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/register/totem/sysctrl_reg_offset.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/service_type_pub.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/smf_cache_type.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/smf_instance_id.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/sml_table.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/sml_table_define_cloud.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/sml_table_define_compute_dpu.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/sml_table_define_compute_roce.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/sml_table_define_compute_standard.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/sml_table_define_fpga.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/sml_table_define_llt.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/sml_table_define_storage_fc.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/sml_table_define_storage_fc_adapt.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/sml_table_define_storage_roce.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/sml_table_define_storage_roceaa.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/sml_table_define_storage_toe.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/sml_table_pub.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/hw/tile_spec.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/mag/mag_mpu_cmd.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/mag/mag_mpu_cmd_defs.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/micro_log/hinic3_micro_log.c
create mode 100644 drivers/infiniband/hw/hiroce3/include/micro_log/hinic3_micro_log.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/nic/nic_cfg_comm.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/nic/nic_mpu_cmd.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/nic/nic_mpu_cmd_defs.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/nic/nic_npu_cmd.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/nic/nic_npu_cmd_defs.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/nic/nic_npu_wqe_defs.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/public/npu_cmdq_base_defs.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/rdma_context_format.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/rdma_ext_ctx_format.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_ccf_format.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_compile_macro.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_ctx_api.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_dif_format.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_err_type.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_hmm_context.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_mpu_common.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_mpu_ulp_common.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_pub.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_pub_cmd.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_ulp.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_vbs_format.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_verbs_attr.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_verbs_attr_qpc_chip.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_verbs_cmd.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_verbs_cq_attr.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_verbs_ext_attr.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_verbs_format.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_verbs_gid_attr.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_verbs_mr_attr.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_verbs_pub.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_verbs_srq_attr.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_verbs_ulp_format.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_wqe_format.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/rdma/roce_xqe_format.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/readme.txt
create mode 100644 drivers/infiniband/hw/hiroce3/include/roce_cdev_extension.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/roce_event_extension.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/roce_main_extension.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/roce_mr_extension.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/roce_netdev_extension.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/roce_qp_extension.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/roce_qp_post_send_extension.h
create mode 100644 drivers/infiniband/hw/hiroce3/include/roce_srq_extension.h
create mode 100755 drivers/infiniband/hw/hiroce3/mr/roce_mr.c
create mode 100644 drivers/infiniband/hw/hiroce3/mr/roce_mr.h
create mode 100644 drivers/infiniband/hw/hiroce3/qp/roce_post.h
create mode 100644 drivers/infiniband/hw/hiroce3/qp/roce_qp.h
create mode 100755 drivers/infiniband/hw/hiroce3/qp/roce_qp_create.c
create mode 100644 drivers/infiniband/hw/hiroce3/qp/roce_qp_destroy.c
create mode 100644 drivers/infiniband/hw/hiroce3/qp/roce_qp_exp.h
create mode 100755 drivers/infiniband/hw/hiroce3/qp/roce_qp_modify.c
create mode 100644 drivers/infiniband/hw/hiroce3/qp/roce_qp_post_recv.c
create mode 100644 drivers/infiniband/hw/hiroce3/qp/roce_qp_post_send.c
create mode 100644 drivers/infiniband/hw/hiroce3/qp/roce_qp_query.c
create mode 100644 drivers/infiniband/hw/hiroce3/rdma/rdma_bitmap.c
create mode 100644 drivers/infiniband/hw/hiroce3/rdma/rdma_bitmap.h
create mode 100644 drivers/infiniband/hw/hiroce3/rdma/rdma_comp.c
create mode 100644 drivers/infiniband/hw/hiroce3/rdma/rdma_comp.h
create mode 100644 drivers/infiniband/hw/hiroce3/rdma/rdma_comp_gid.c
create mode 100644 drivers/infiniband/hw/hiroce3/rdma/rdma_comp_init.c
create mode 100644 drivers/infiniband/hw/hiroce3/rdma/rdma_comp_mw_mr.c
create mode 100644 drivers/infiniband/hw/hiroce3/rdma/rdma_comp_pd.c
create mode 100644 drivers/infiniband/hw/hiroce3/rdma/rdma_comp_res.c
create mode 100755 drivers/infiniband/hw/hiroce3/roce.h
create mode 100755 drivers/infiniband/hw/hiroce3/roce_cdev.c
create mode 100755 drivers/infiniband/hw/hiroce3/roce_cmd.c
create mode 100755 drivers/infiniband/hw/hiroce3/roce_cmd.h
create mode 100644 drivers/infiniband/hw/hiroce3/roce_compat.h
create mode 100644 drivers/infiniband/hw/hiroce3/roce_cqm_cmd.c
create mode 100644 drivers/infiniband/hw/hiroce3/roce_cqm_cmd.h
create mode 100755 drivers/infiniband/hw/hiroce3/roce_db.c
create mode 100644 drivers/infiniband/hw/hiroce3/roce_db.h
create mode 100644 drivers/infiniband/hw/hiroce3/roce_event.c
create mode 100644 drivers/infiniband/hw/hiroce3/roce_event.h
create mode 100755 drivers/infiniband/hw/hiroce3/roce_k_ioctl.h
create mode 100755 drivers/infiniband/hw/hiroce3/roce_main.c
create mode 100755 drivers/infiniband/hw/hiroce3/roce_mix.c
create mode 100755 drivers/infiniband/hw/hiroce3/roce_mix.h
create mode 100644 drivers/infiniband/hw/hiroce3/roce_netdev.c
create mode 100644 drivers/infiniband/hw/hiroce3/roce_netdev.h
create mode 100755 drivers/infiniband/hw/hiroce3/roce_netlink.c
create mode 100644 drivers/infiniband/hw/hiroce3/roce_netlink.h
create mode 100644 drivers/infiniband/hw/hiroce3/roce_pd.c
create mode 100644 drivers/infiniband/hw/hiroce3/roce_pd.h
create mode 100755 drivers/infiniband/hw/hiroce3/roce_sysfs.c
create mode 100644 drivers/infiniband/hw/hiroce3/roce_sysfs.h
create mode 100644 drivers/infiniband/hw/hiroce3/roce_user.h
create mode 100644 drivers/infiniband/hw/hiroce3/roce_xrc.c
create mode 100644 drivers/infiniband/hw/hiroce3/roce_xrc.h
create mode 100644 drivers/infiniband/hw/hiroce3/srq/roce_srq.h
create mode 100644 drivers/infiniband/hw/hiroce3/srq/roce_srq_comm.c
create mode 100644 drivers/infiniband/hw/hiroce3/srq/roce_srq_create.c
create mode 100644 drivers/infiniband/hw/hiroce3/srq/roce_srq_ctrl.c
mode change 100644 => 100755 drivers/net/ethernet/huawei/hinic3/Makefile
create mode 100755 drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.c
create mode 100755 drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.h
mode change 100644 => 100755 drivers/net/ethernet/huawei/hinic3/comm_msg_intf.h
create mode 100755 drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.c
create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.h
create mode 100755 drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.c
create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_bloomfilter.c
create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_bloomfilter.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_cmd.c
create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_cmd.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_db.c
create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_db.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_define.h
create mode 100755 drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.c
create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.h
create mode 100755 drivers/net/ethernet/huawei/hinic3/cqm/cqm_memsec.c
create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_memsec.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_object.c
create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_object.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_object_intern.c
create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_object_intern.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/readme.txt
mode change 100644 => 100755 drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.c
mode change 100644 => 100755 drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.h
mode change 100644 => 100755 drivers/net/ethernet/huawei/hinic3/hw/hinic3_devlink.c
mode change 100644 => 100755 drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.c
mode change 100644 => 100755 drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c
mode change 100644 => 100755 drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c
mode change 100644 => 100755 drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.c
create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_multi_host_mgmt.c
create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_multi_host_mgmt.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/readme.txt
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/bond/bond_common_defs.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/cqm/cqm_npu_cmd.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/cqm/cqm_npu_cmd_defs.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hinic3_common.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hinic3_cqm.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hinic3_cqm_define.h
rename drivers/net/ethernet/huawei/hinic3/{ => include}/hinic3_crm.h (90%)
mode change 100644 => 100755
rename drivers/net/ethernet/huawei/hinic3/{ => include}/hinic3_hw.h (94%)
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hinic3_lld.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hinic3_mt.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hinic3_profile.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hinic3_wq.h
create mode 100755 drivers/net/ethernet/huawei/hinic3/include/hiudk3.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hiudk3_common.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hiudk3_cqm.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hiudk3_crm.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hiudk3_crm_adapt.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hiudk3_hw.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hiudk3_lld.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hiudk3_wq.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hiudk_adpt.h
create mode 100755 drivers/net/ethernet/huawei/hinic3/include/hiudk_sdk_cqm_adpt.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hiudk_sdk_crm.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hiudk_sdk_crm_adpt.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hiudk_sdk_hw_adpt.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hiudk_sdk_hw_comm_adpt.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_board_defs.h
create mode 100755 drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_cmd_base_defs.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd_defs.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_outband_mctp_cmd.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_outband_mctp_cmd_defs.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_outband_ncsi_cmd.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_outband_ncsi_cmd_defs.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_outband_smbus_cmd.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_outband_smbus_cmd_defs.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/ossl_ctype_ex.h
rename drivers/net/ethernet/huawei/hinic3/{ => include}/ossl_knl.h (93%)
mode change 100644 => 100755
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/ossl_types.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/ossl_user.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/public/npu_cmdq_base_defs.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/readme.txt
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/vmsec/vmsec_mpu_common.h
create mode 100644 drivers/net/ethernet/huawei/hinic3/include/vram_common.h
mode change 100644 => 100755 drivers/net/ethernet/huawei/hinic3/ossl_knl_linux.h
--
2.33.0
1
2
From: Fullway Wang <fullwaywang(a)outlook.com>
stable inclusion
from stable-v5.10.211
commit cd36da760bd1f78c63c7078407baf01dd724f313
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9E2MG
CVE: CVE-2024-26777
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit e421946be7d9bf545147bea8419ef8239cb7ca52 ]
The userspace program could pass any values to the driver through
ioctl() interface. If the driver doesn't check the value of pixclock,
it may cause divide-by-zero error.
In sisfb_check_var(), var->pixclock is used as a divisor to caculate
drate before it is checked against zero. Fix this by checking it
at the beginning.
This is similar to CVE-2022-3061 in i740fb which was fixed by
commit 15cf0b8.
Signed-off-by: Fullway Wang <fullwaywang(a)outlook.com>
Signed-off-by: Helge Deller <deller(a)gmx.de>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Jinjiang Tu <tujinjiang(a)huawei.com>
---
drivers/video/fbdev/sis/sis_main.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index 03c736f6f3d0..e540cb0c5172 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -1474,6 +1474,8 @@ sisfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
vtotal = var->upper_margin + var->lower_margin + var->vsync_len;
+ if (!var->pixclock)
+ return -EINVAL;
pixclock = var->pixclock;
if((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) {
--
2.25.1
2
1

09 Apr '24
From: Fullway Wang <fullwaywang(a)outlook.com>
stable inclusion
from stable-v4.19.308
commit 84246c35ca34207114055a87552a1c4289c8fd7e
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9E2MG
CVE: CVE-2024-26777
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit e421946be7d9bf545147bea8419ef8239cb7ca52 ]
The userspace program could pass any values to the driver through
ioctl() interface. If the driver doesn't check the value of pixclock,
it may cause divide-by-zero error.
In sisfb_check_var(), var->pixclock is used as a divisor to caculate
drate before it is checked against zero. Fix this by checking it
at the beginning.
This is similar to CVE-2022-3061 in i740fb which was fixed by
commit 15cf0b8.
Signed-off-by: Fullway Wang <fullwaywang(a)outlook.com>
Signed-off-by: Helge Deller <deller(a)gmx.de>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Jinjiang Tu <tujinjiang(a)huawei.com>
---
drivers/video/fbdev/sis/sis_main.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index 20aff9005978..b7f9da690db2 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -1488,6 +1488,8 @@ sisfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
vtotal = var->upper_margin + var->lower_margin + var->vsync_len;
+ if (!var->pixclock)
+ return -EINVAL;
pixclock = var->pixclock;
if((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) {
--
2.25.1
2
1
CVE-2021-47144
Jingwen Chen (1):
drm/amd/amdgpu: fix refcount leak
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 3 +++
1 file changed, 3 insertions(+)
--
2.25.1
2
2
CVE-2021-47144
Jingwen Chen (1):
drm/amd/amdgpu: fix refcount leak
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 3 +++
1 file changed, 3 insertions(+)
--
2.25.1
2
2

[openeuler:openEuler-1.0-LTS 21385/22056] drivers/tee/optee/core.c:618:31: sparse: sparse: incorrect type in return expression (different base types)
by kernel test robot 09 Apr '24
by kernel test robot 09 Apr '24
09 Apr '24
Hi Ard,
First bad commit (maybe != root cause):
tree: https://gitee.com/openeuler/kernel.git openEuler-1.0-LTS
head: 11b92ddca3376d425d16b81adebee1e8033e3df0
commit: 595311cf7e49c781dd26782e4b45fd54bbfb3e40 [21385/22056] optee: model OP-TEE as a platform device/driver
config: arm64-randconfig-r111-20240331 (https://download.01.org/0day-ci/archive/20240409/202404091014.UzbOQtd8-lkp@…)
compiler: aarch64-linux-gcc (GCC) 13.2.0
reproduce: (https://download.01.org/0day-ci/archive/20240409/202404091014.UzbOQtd8-lkp@…)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp(a)intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202404091014.UzbOQtd8-lkp@intel.com/
sparse warnings: (new ones prefixed by >>)
>> drivers/tee/optee/core.c:618:31: sparse: sparse: incorrect type in return expression (different base types) @@ expected int @@ got void * @@
drivers/tee/optee/core.c:618:31: sparse: expected int
drivers/tee/optee/core.c:618:31: sparse: got void *
drivers/tee/optee/core.c: In function 'optee_probe':
drivers/tee/optee/core.c:618:24: warning: returning 'void *' from a function with return type 'int' makes integer from pointer without a cast [-Wint-conversion]
618 | return ERR_PTR(-EINVAL);
| ^~~~~~~~~~~~~~~~
vim +618 drivers/tee/optee/core.c
595311cf7e49c7 Ard Biesheuvel 2019-12-09 581
595311cf7e49c7 Ard Biesheuvel 2019-12-09 582 static int optee_probe(struct platform_device *pdev)
4fb0a5eb364d23 Jens Wiklander 2015-04-14 583 {
4fb0a5eb364d23 Jens Wiklander 2015-04-14 584 optee_invoke_fn *invoke_fn;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 585 struct tee_shm_pool *pool;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 586 struct optee *optee = NULL;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 587 void *memremaped_shm = NULL;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 588 struct tee_device *teedev;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 589 u32 sec_caps;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 590 int rc;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 591
595311cf7e49c7 Ard Biesheuvel 2019-12-09 592 invoke_fn = get_invoke_func(&pdev->dev);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 593 if (IS_ERR(invoke_fn))
595311cf7e49c7 Ard Biesheuvel 2019-12-09 594 return PTR_ERR(invoke_fn);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 595
4fb0a5eb364d23 Jens Wiklander 2015-04-14 596 if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
4fb0a5eb364d23 Jens Wiklander 2015-04-14 597 pr_warn("api uid mismatch\n");
595311cf7e49c7 Ard Biesheuvel 2019-12-09 598 return -EINVAL;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 599 }
4fb0a5eb364d23 Jens Wiklander 2015-04-14 600
5c5f80307ab27c Jérôme Forissier 2017-11-24 601 optee_msg_get_os_revision(invoke_fn);
5c5f80307ab27c Jérôme Forissier 2017-11-24 602
4fb0a5eb364d23 Jens Wiklander 2015-04-14 603 if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
4fb0a5eb364d23 Jens Wiklander 2015-04-14 604 pr_warn("api revision mismatch\n");
595311cf7e49c7 Ard Biesheuvel 2019-12-09 605 return -EINVAL;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 606 }
4fb0a5eb364d23 Jens Wiklander 2015-04-14 607
4fb0a5eb364d23 Jens Wiklander 2015-04-14 608 if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
4fb0a5eb364d23 Jens Wiklander 2015-04-14 609 pr_warn("capabilities mismatch\n");
595311cf7e49c7 Ard Biesheuvel 2019-12-09 610 return -EINVAL;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 611 }
4fb0a5eb364d23 Jens Wiklander 2015-04-14 612
4fb0a5eb364d23 Jens Wiklander 2015-04-14 613 /*
4fb0a5eb364d23 Jens Wiklander 2015-04-14 614 * We have no other option for shared memory, if secure world
4fb0a5eb364d23 Jens Wiklander 2015-04-14 615 * doesn't have any reserved memory we can use we can't continue.
4fb0a5eb364d23 Jens Wiklander 2015-04-14 616 */
4fb0a5eb364d23 Jens Wiklander 2015-04-14 617 if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
4fb0a5eb364d23 Jens Wiklander 2015-04-14 @618 return ERR_PTR(-EINVAL);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 619
f58e236c9d665a Volodymyr Babchuk 2017-11-29 620 pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm, sec_caps);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 621 if (IS_ERR(pool))
595311cf7e49c7 Ard Biesheuvel 2019-12-09 622 return PTR_ERR(pool);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 623
4fb0a5eb364d23 Jens Wiklander 2015-04-14 624 optee = kzalloc(sizeof(*optee), GFP_KERNEL);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 625 if (!optee) {
4fb0a5eb364d23 Jens Wiklander 2015-04-14 626 rc = -ENOMEM;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 627 goto err;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 628 }
4fb0a5eb364d23 Jens Wiklander 2015-04-14 629
4fb0a5eb364d23 Jens Wiklander 2015-04-14 630 optee->invoke_fn = invoke_fn;
d885cc5e0759fc Volodymyr Babchuk 2017-11-29 631 optee->sec_caps = sec_caps;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 632
4fb0a5eb364d23 Jens Wiklander 2015-04-14 633 teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 634 if (IS_ERR(teedev)) {
4fb0a5eb364d23 Jens Wiklander 2015-04-14 635 rc = PTR_ERR(teedev);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 636 goto err;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 637 }
4fb0a5eb364d23 Jens Wiklander 2015-04-14 638 optee->teedev = teedev;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 639
4fb0a5eb364d23 Jens Wiklander 2015-04-14 640 teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 641 if (IS_ERR(teedev)) {
4fb0a5eb364d23 Jens Wiklander 2015-04-14 642 rc = PTR_ERR(teedev);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 643 goto err;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 644 }
4fb0a5eb364d23 Jens Wiklander 2015-04-14 645 optee->supp_teedev = teedev;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 646
4fb0a5eb364d23 Jens Wiklander 2015-04-14 647 rc = tee_device_register(optee->teedev);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 648 if (rc)
4fb0a5eb364d23 Jens Wiklander 2015-04-14 649 goto err;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 650
4fb0a5eb364d23 Jens Wiklander 2015-04-14 651 rc = tee_device_register(optee->supp_teedev);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 652 if (rc)
4fb0a5eb364d23 Jens Wiklander 2015-04-14 653 goto err;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 654
4fb0a5eb364d23 Jens Wiklander 2015-04-14 655 mutex_init(&optee->call_queue.mutex);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 656 INIT_LIST_HEAD(&optee->call_queue.waiters);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 657 optee_wait_queue_init(&optee->wait_queue);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 658 optee_supp_init(&optee->supp);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 659 optee->memremaped_shm = memremaped_shm;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 660 optee->pool = pool;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 661
4fb0a5eb364d23 Jens Wiklander 2015-04-14 662 optee_enable_shm_cache(optee);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 663
595311cf7e49c7 Ard Biesheuvel 2019-12-09 664 platform_set_drvdata(pdev, optee);
595311cf7e49c7 Ard Biesheuvel 2019-12-09 665
4fb0a5eb364d23 Jens Wiklander 2015-04-14 666 pr_info("initialized driver\n");
595311cf7e49c7 Ard Biesheuvel 2019-12-09 667 return 0;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 668 err:
4fb0a5eb364d23 Jens Wiklander 2015-04-14 669 if (optee) {
4fb0a5eb364d23 Jens Wiklander 2015-04-14 670 /*
4fb0a5eb364d23 Jens Wiklander 2015-04-14 671 * tee_device_unregister() is safe to call even if the
4fb0a5eb364d23 Jens Wiklander 2015-04-14 672 * devices hasn't been registered with
4fb0a5eb364d23 Jens Wiklander 2015-04-14 673 * tee_device_register() yet.
4fb0a5eb364d23 Jens Wiklander 2015-04-14 674 */
4fb0a5eb364d23 Jens Wiklander 2015-04-14 675 tee_device_unregister(optee->supp_teedev);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 676 tee_device_unregister(optee->teedev);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 677 kfree(optee);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 678 }
4fb0a5eb364d23 Jens Wiklander 2015-04-14 679 if (pool)
4fb0a5eb364d23 Jens Wiklander 2015-04-14 680 tee_shm_pool_free(pool);
4fb0a5eb364d23 Jens Wiklander 2015-04-14 681 if (memremaped_shm)
4fb0a5eb364d23 Jens Wiklander 2015-04-14 682 memunmap(memremaped_shm);
595311cf7e49c7 Ard Biesheuvel 2019-12-09 683 return rc;
4fb0a5eb364d23 Jens Wiklander 2015-04-14 684 }
4fb0a5eb364d23 Jens Wiklander 2015-04-14 685
:::::: The code at line 618 was first introduced by commit
:::::: 4fb0a5eb364d239722e745c02aef0dbd4e0f1ad2 tee: add OP-TEE driver
:::::: TO: Jens Wiklander <jens.wiklander(a)linaro.org>
:::::: CC: Jens Wiklander <jens.wiklander(a)linaro.org>
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
1
0

[PATCH OLK-6.6] irqdomain: Fix driver re-inserting failures when IRQs not being freed
by Jie Zhan 09 Apr '24
by Jie Zhan 09 Apr '24
09 Apr '24
driver inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I9ERMB
------------------------------------------------------------------------
Since commit 4615fbc3788d ("genirq/irqdomain: Don't try to free an
interrupt that has no mapping"), we have found failures when
re-inserting some specific drivers:
[root@localhost ~]# rmmod hisi_sas_v3_hw
[root@localhost ~]# modprobe hisi_sas_v3_hw
[ 1295.622525] hisi_sas_v3_hw: probe of 0000:30:04.0 failed with error -2
A relevant discussion can be found at:
https://lore.kernel.org/lkml/3d3d0155e66429968cb4f6b4feeae4b3@kernel.org/
This is because IRQs from a low-level domain are not freed together,
leaving some leaked. Thus, the next driver insertion fails to allocate
the same number of IRQs.
Free a contiguous group of IRQs in one go to fix this issue.
Fixes: 4615fbc3788d ("genirq/irqdomain: Don't try to free an interrupt that has no mapping")
Signed-off-by: Jie Zhan <zhanjie9(a)hisilicon.com>
Reviewed-by: Liao Chang <liaochang1(a)huawei.com>
Signed-off-by: Zheng Zengkai <zhengzengkai(a)huawei.com>
---
kernel/irq/irqdomain.c | 15 +++++++++++++--
1 file changed, 13 insertions(+), 2 deletions(-)
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 0bdef4fe925b..2ec5bd39635c 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1443,13 +1443,24 @@ static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
unsigned int nr_irqs)
{
unsigned int i;
+ int n;
if (!domain->ops->free)
return;
for (i = 0; i < nr_irqs; i++) {
- if (irq_domain_get_irq_data(domain, irq_base + i))
- domain->ops->free(domain, irq_base + i, 1);
+ /* Find the largest possible span of IRQs to free in one go */
+ for (n = 0;
+ ((i + n) < nr_irqs) &&
+ (irq_domain_get_irq_data(domain, irq_base + i + n));
+ n++)
+ ;
+
+ if (!n)
+ continue;
+
+ domain->ops->free(domain, irq_base + i, n);
+ i += n;
}
}
--
2.30.0
3
2
CVE-2021-47144
Jingwen Chen (1):
drm/amd/amdgpu: fix refcount leak
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 3 +++
1 file changed, 3 insertions(+)
--
2.25.1
2
2
From: Jingxian He <hejingxian(a)huawei.com>
Add cvm feature patches:
1. add cvm host feature
2. enable pmu phys irq inject for cvm
3. add bounce buffer feature for cvm guest
4. add lpi support for cvm guest
arch/arm64/configs/defconfig | 2 +
arch/arm64/configs/openeuler_defconfig | 2 +
arch/arm64/include/asm/cvm_guest.h | 21 +
arch/arm64/include/asm/kvm_emulate.h | 14 +
arch/arm64/include/asm/kvm_host.h | 12 +
arch/arm64/include/asm/kvm_tmi.h | 376 +++++++++++
arch/arm64/include/asm/kvm_tmm.h | 72 +++
arch/arm64/kvm/Kconfig | 16 +
arch/arm64/kvm/Makefile | 5 +
arch/arm64/kvm/arch_timer.c | 104 +++-
arch/arm64/kvm/arm.c | 155 ++++-
arch/arm64/kvm/cvm.c | 824 +++++++++++++++++++++++++
arch/arm64/kvm/cvm_exit.c | 229 +++++++
arch/arm64/kvm/cvm_guest.c | 90 +++
arch/arm64/kvm/guest.c | 8 +
arch/arm64/kvm/hyp/vgic-v3-sr.c | 19 +
arch/arm64/kvm/mmio.c | 17 +-
arch/arm64/kvm/mmu.c | 7 +
arch/arm64/kvm/pmu-emul.c | 9 +
arch/arm64/kvm/psci.c | 12 +-
arch/arm64/kvm/reset.c | 10 +
arch/arm64/kvm/tmi.c | 148 +++++
arch/arm64/kvm/vgic/vgic-v3.c | 16 +-
arch/arm64/kvm/vgic/vgic.c | 52 +-
arch/arm64/mm/mmu.c | 11 +
arch/arm64/mm/pageattr.c | 9 +-
drivers/irqchip/irq-gic-v3-its.c | 228 ++++++-
drivers/perf/arm_pmu.c | 17 +
include/kvm/arm_arch_timer.h | 4 +
include/linux/kvm_host.h | 21 +
include/linux/perf/arm_pmu.h | 3 +
include/linux/swiotlb.h | 13 +
include/uapi/linux/kvm.h | 29 +
kernel/dma/direct.c | 39 ++
kernel/dma/swiotlb.c | 86 ++-
virt/kvm/kvm_main.c | 7 +-
36 files changed, 2646 insertions(+), 41 deletions(-)
create mode 100644 arch/arm64/include/asm/cvm_guest.h
create mode 100644 arch/arm64/include/asm/kvm_tmi.h
create mode 100644 arch/arm64/include/asm/kvm_tmm.h
create mode 100644 arch/arm64/kvm/cvm.c
create mode 100644 arch/arm64/kvm/cvm_exit.c
create mode 100644 arch/arm64/kvm/cvm_guest.c
create mode 100644 arch/arm64/kvm/tmi.c
--
2.33.0
2
5
From: caijian <caijian11(a)h-partners.com>
The patches support for nmi in the virtual machine.
https://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms.git/log/?…
Marc Zyngier (15):
KVM: arm64: vgic-v3: Upgrade AP1Rn to 64bit.
KVM: arm64: vgic-v3: Allow the NMI state to make it into the LRs
KVM: arm64: vgic-v3: Make NMI priority RES0
KVM: arm64: vgic-v4: Propagate the NMI state into the GICv4.1 VSGI
configuration
KVM: arm64: vgic-v3: Use the NMI attribute as part of the AP-list
sorting
KVM: arm64: vgic-v3: Add support for GIC{D,R}_INMIR registers
KVM: arm64: vgic-v3: Add userspace selection for GICv3.3 NMI
KVM: arm64: vgic-debug: Add the NMI field to the debug output
KVM: arm64: Allow userspace to control ID_AA64PFR1_EL1.NMI
KVM: arm64: Don't trap ALLINT accesses if the vcpu has FEAT_NMI
KVM: arm64: vgic-v3: Don't inject an NMI if the vcpu doesn't have
FEAT_NMI
KVM: arm64: Allow GICv3.3 NMI if the host supports it
KVM: arm64: Handle traps of ALLINT
arm64: Decouple KVM from CONFIG_ARM64_NMI
KVM: arm64: vgic-v3: Handle traps of ICV_NMIAR1_EL1
arch/arm64/include/asm/cpufeature.h | 3 +-
arch/arm64/include/asm/kvm_host.h | 2 +
arch/arm64/include/asm/sysreg.h | 3 ++
arch/arm64/kernel/cpufeature.c | 13 +++--
arch/arm64/kvm/arm.c | 3 ++
arch/arm64/kvm/hyp/include/hyp/switch.h | 6 ++-
arch/arm64/kvm/hyp/vgic-v3-sr.c | 14 +++--
arch/arm64/kvm/sys_regs.c | 33 +++++++++++-
arch/arm64/kvm/vgic/vgic-debug.c | 9 ++--
arch/arm64/kvm/vgic/vgic-init.c | 10 +++-
arch/arm64/kvm/vgic/vgic-mmio-v2.c | 1 +
arch/arm64/kvm/vgic/vgic-mmio-v3.c | 70 ++++++++++++++++++++++++-
arch/arm64/kvm/vgic/vgic-mmio.c | 25 ++++++---
arch/arm64/kvm/vgic/vgic-v3.c | 12 ++++-
arch/arm64/kvm/vgic/vgic-v4.c | 1 +
arch/arm64/kvm/vgic/vgic.c | 8 ++-
arch/arm64/kvm/vgic/vgic.h | 1 +
drivers/irqchip/irq-gic-v3-its.c | 9 ++++
drivers/irqchip/irq-gic-v3.c | 12 +++++
drivers/irqchip/irq-gic-v4.c | 3 +-
include/kvm/arm_vgic.h | 10 +++-
include/linux/irqchip/arm-gic-v4.h | 4 +-
include/linux/irqchip/arm-vgic-info.h | 2 +
23 files changed, 224 insertions(+), 30 deletions(-)
--
2.30.0
2
16

09 Apr '24
This patchset includes 2 minor changes
- For interrupt coalescing, the count of CQ entries is set to 10, and the
interrupt coalescing timeout period is set to 10us.
- Add cond_resched() to cq_thread_v3_hw() to execute the watchdog thread.
Changes since v1:
- Remove unnecessary comments.
- Update the commit message for patch 1.
Yihang Li (2):
scsi: hisi_sas: Default enable interrupt coalescing
scsi: hisi_sas: Add cond_resched() to cq_thread_v3_hw()
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 13 ++++++-------
1 file changed, 6 insertions(+), 7 deletions(-)
--
2.33.0
3
4
From: Jingxian He <hejingxian(a)huawei.com>
Add cvm feature patches:
1. add cvm host feature
2. enable pmu phys irq inject for cvm
3. add bounce buffer feature for cvm guest
4. add lpi support for cvm guest
arch/arm64/configs/defconfig | 2 +
arch/arm64/configs/openeuler_defconfig | 2 +
arch/arm64/include/asm/cvm_guest.h | 21 +
arch/arm64/include/asm/kvm_emulate.h | 14 +
arch/arm64/include/asm/kvm_host.h | 12 +
arch/arm64/include/asm/kvm_tmi.h | 376 +++++++++++
arch/arm64/include/asm/kvm_tmm.h | 72 +++
arch/arm64/kvm/Kconfig | 16 +
arch/arm64/kvm/Makefile | 5 +
arch/arm64/kvm/arch_timer.c | 104 +++-
arch/arm64/kvm/arm.c | 155 ++++-
arch/arm64/kvm/cvm.c | 824 +++++++++++++++++++++++++
arch/arm64/kvm/cvm_exit.c | 229 +++++++
arch/arm64/kvm/cvm_guest.c | 90 +++
arch/arm64/kvm/guest.c | 8 +
arch/arm64/kvm/hyp/vgic-v3-sr.c | 19 +
arch/arm64/kvm/mmio.c | 17 +-
arch/arm64/kvm/mmu.c | 7 +
arch/arm64/kvm/pmu-emul.c | 9 +
arch/arm64/kvm/psci.c | 12 +-
arch/arm64/kvm/reset.c | 10 +
arch/arm64/kvm/tmi.c | 148 +++++
arch/arm64/kvm/vgic/vgic-v3.c | 16 +-
arch/arm64/kvm/vgic/vgic.c | 52 +-
arch/arm64/mm/mmu.c | 11 +
arch/arm64/mm/pageattr.c | 9 +-
drivers/irqchip/irq-gic-v3-its.c | 228 ++++++-
drivers/perf/arm_pmu.c | 17 +
include/kvm/arm_arch_timer.h | 4 +
include/linux/kvm_host.h | 21 +
include/linux/perf/arm_pmu.h | 3 +
include/linux/swiotlb.h | 13 +
include/uapi/linux/kvm.h | 29 +
kernel/dma/direct.c | 39 ++
kernel/dma/swiotlb.c | 86 ++-
virt/kvm/kvm_main.c | 7 +-
36 files changed, 2646 insertions(+), 41 deletions(-)
create mode 100644 arch/arm64/include/asm/cvm_guest.h
create mode 100644 arch/arm64/include/asm/kvm_tmi.h
create mode 100644 arch/arm64/include/asm/kvm_tmm.h
create mode 100644 arch/arm64/kvm/cvm.c
create mode 100644 arch/arm64/kvm/cvm_exit.c
create mode 100644 arch/arm64/kvm/cvm_guest.c
create mode 100644 arch/arm64/kvm/tmi.c
--
2.33.0
2
5

[openeuler:openEuler-1.0-LTS 18302/22054] mm/share_pool.c:837:29: sparse: sparse: incompatible types for operation (<=):
by kernel test robot 09 Apr '24
by kernel test robot 09 Apr '24
09 Apr '24
tree: https://gitee.com/openeuler/kernel.git openEuler-1.0-LTS
head: 1ed85cbd67db3ccd721e15f7459154e8daec22a8
commit: 0f0c3021514f52310056c32406b4b760fc9a7e6e [18302/22054] share_pool: Apply sp_group_id_by_pid() to multi-group-mode
config: arm64-randconfig-r111-20240331 (https://download.01.org/0day-ci/archive/20240409/202404090717.XitFSfCo-lkp@…)
compiler: aarch64-linux-gcc (GCC) 13.2.0
reproduce: (https://download.01.org/0day-ci/archive/20240409/202404090717.XitFSfCo-lkp@…)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp(a)intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202404090717.XitFSfCo-lkp@intel.com/
sparse warnings: (new ones prefixed by >>)
>> mm/share_pool.c:837:29: sparse: sparse: incompatible types for operation (<=):
mm/share_pool.c:837:29: sparse: int *num
mm/share_pool.c:837:29: sparse: int
mm/share_pool.c:1628:9: sparse: sparse: undefined identifier 'sysctl_compaction_handler'
mm/share_pool.c: In function 'sp_group_id_by_pid':
mm/share_pool.c:837:29: warning: ordered comparison of pointer with integer zero [-Wextra]
837 | if (!spg_ids || num <= 0)
| ^~
mm/share_pool.c: In function 'sp_compact_nodes':
mm/share_pool.c:1628:9: error: implicit declaration of function 'sysctl_compaction_handler' [-Werror=implicit-function-declaration]
1628 | sysctl_compaction_handler(NULL, 1, NULL, NULL, NULL);
| ^~~~~~~~~~~~~~~~~~~~~~~~~
In function 'vmalloc_area_clr_flag',
inlined from 'sp_make_share_k2u' at mm/share_pool.c:2550:8:
mm/share_pool.c:2396:18: warning: 'spa' may be used uninitialized [-Wmaybe-uninitialized]
2396 | spa->kva = 0;
| ~~~~~~~~~^~~
mm/share_pool.c: In function 'sp_make_share_k2u':
mm/share_pool.c:2429:25: note: 'spa' was declared here
2429 | struct sp_area *spa;
| ^~~
mm/share_pool.c:2562:16: warning: 'uva' may be used uninitialized [-Wmaybe-uninitialized]
2562 | return uva;
| ^~~
mm/share_pool.c:2427:15: note: 'uva' was declared here
2427 | void *uva;
| ^~~
mm/share_pool.c: In function 'sp_group_post_exit':
mm/share_pool.c:3863:37: warning: 'alloc_size' may be used uninitialized [-Wmaybe-uninitialized]
3863 | if (alloc_size != 0 || k2u_size != 0)
| ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~
mm/share_pool.c:3831:14: note: 'alloc_size' was declared here
3831 | long alloc_size, k2u_size;
| ^~~~~~~~~~
mm/share_pool.c:3863:37: warning: 'k2u_size' may be used uninitialized [-Wmaybe-uninitialized]
3863 | if (alloc_size != 0 || k2u_size != 0)
| ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~
mm/share_pool.c:3831:26: note: 'k2u_size' was declared here
3831 | long alloc_size, k2u_size;
| ^~~~~~~~
cc1: some warnings being treated as errors
vim +837 mm/share_pool.c
815
816 /**
817 * sp_group_id_by_pid() - Get the sp_group ID array of a process.
818 * @pid: pid of target process.
819 * @spg_ids point to an array to save the group ids the process belongs to
820 * @num input the spg_ids array size; output the spg number of the process
821 *
822 * Return:
823 * >0 - the sp_group ID.
824 * -ENODEV - target process doesn't belong to any sp_group.
825 * -EINVAL - spg_ids or num is NULL.
826 * -E2BIG - the num of groups process belongs to is larger than *num
827 */
828 int sp_group_id_by_pid(int pid, int *spg_ids, int *num)
829 {
830 int ret = 0;
831 struct sp_group_node *node;
832 struct sp_group_master *master = NULL;
833 struct task_struct *tsk;
834
835 check_interrupt_context();
836
> 837 if (!spg_ids || num <= 0)
838 return -EINVAL;
839
840 ret = get_task(pid, &tsk);
841 if (ret)
842 return ret;
843
844 down_read(&sp_group_sem);
845 task_lock(tsk);
846 if (tsk->mm)
847 master = tsk->mm->sp_group_master;
848 task_unlock(tsk);
849
850 if (!master) {
851 ret = -ENODEV;
852 goto out_up_read;
853 }
854
855 if (!master->count) {
856 ret = -ENODEV;
857 goto out_up_read;
858 }
859 if ((unsigned int)*num < master->count) {
860 ret = -E2BIG;
861 goto out_up_read;
862 }
863 *num = master->count;
864
865 list_for_each_entry(node, &master->node_list, group_node)
866 *(spg_ids++) = node->spg->id;
867
868 out_up_read:
869 up_read(&sp_group_sem);
870 put_task_struct(tsk);
871 return ret;
872 }
873 EXPORT_SYMBOL_GPL(sp_group_id_by_pid);
874
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
1
0

[openeuler:OLK-6.6 6913/7311] drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:6003:37: sparse: sparse: restricted __le32 degrades to integer
by kernel test robot 09 Apr '24
by kernel test robot 09 Apr '24
09 Apr '24
tree: https://gitee.com/openeuler/kernel.git OLK-6.6
head: 05607873db411ec3c614313b43cec60138c26a99
commit: e992b88fa60f2e405a35c281c549b4caf3cd78f3 [6913/7311] net: hns3: support set/get VxLAN rule of rx flow director by ethtool
config: loongarch-randconfig-r113-20240408 (https://download.01.org/0day-ci/archive/20240409/202404090532.QLIDcYg8-lkp@…)
compiler: loongarch64-linux-gcc (GCC) 13.2.0
reproduce: (https://download.01.org/0day-ci/archive/20240409/202404090532.QLIDcYg8-lkp@…)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp(a)intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202404090532.QLIDcYg8-lkp@intel.com/
sparse warnings: (new ones prefixed by >>)
>> drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:6003:37: sparse: sparse: restricted __le32 degrades to integer
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:6004:37: sparse: sparse: restricted __le32 degrades to integer
>> drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:6238:17: sparse: sparse: restricted __be32 degrades to integer
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:6332:17: sparse: sparse: restricted __be32 degrades to integer
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:13360:21: sparse: sparse: symbol 'hclge_ops' was not declared. Should it be static?
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c: note: in included file (through include/linux/mmzone.h, include/linux/gfp.h, include/linux/slab.h, ...):
include/linux/page-flags.h:245:46: sparse: sparse: self-comparison always evaluates to false
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:5574:31: sparse: sparse: context imbalance in 'hclge_sync_fd_user_def_cfg' - unexpected unlock
vim +6003 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
5945
5946 int offset, moffset, ip_offset;
5947 enum HCLGE_FD_KEY_OPT key_opt;
5948 u16 tmp_x_s, tmp_y_s;
5949 u32 tmp_x_l, tmp_y_l;
5950 u8 *p = (u8 *)rule;
5951 int i;
5952
5953 if (rule->unused_tuple & BIT(tuple_bit))
5954 return true;
5955
5956 key_opt = tuple_key_info[tuple_bit].key_opt;
5957 offset = tuple_key_info[tuple_bit].offset;
5958 moffset = tuple_key_info[tuple_bit].moffset;
5959
5960 switch (key_opt) {
5961 case KEY_OPT_U8:
5962 calc_x(*key_x, p[offset], p[moffset]);
5963 calc_y(*key_y, p[offset], p[moffset]);
5964
5965 return true;
5966 case KEY_OPT_LE16:
5967 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5968 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5969 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5970 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5971
5972 return true;
5973 case KEY_OPT_LE32:
5974 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5975 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5976 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5977 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5978
5979 return true;
5980 case KEY_OPT_MAC:
5981 for (i = 0; i < ETH_ALEN; i++) {
5982 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5983 p[moffset + i]);
5984 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5985 p[moffset + i]);
5986 }
5987
5988 return true;
5989 case KEY_OPT_IP:
5990 ip_offset = IPV4_INDEX * sizeof(u32);
5991 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5992 *(u32 *)(&p[moffset + ip_offset]));
5993 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5994 *(u32 *)(&p[moffset + ip_offset]));
5995 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5996 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5997
5998 return true;
5999 case KEY_OPT_VNI:
6000 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
6001 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
6002 for (i = 0; i < HCLGE_VNI_LENGTH; i++) {
> 6003 key_x[i] = (cpu_to_le32(tmp_x_l) >> (i * BITS_PER_BYTE)) & 0xFF;
6004 key_y[i] = (cpu_to_le32(tmp_y_l) >> (i * BITS_PER_BYTE)) & 0xFF;
6005 }
6006 return true;
6007 default:
6008 return false;
6009 }
6010 }
6011
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
1
0

[openeuler:OLK-6.6] BUILD REGRESSION 05607873db411ec3c614313b43cec60138c26a99
by kernel test robot 09 Apr '24
by kernel test robot 09 Apr '24
09 Apr '24
tree/branch: https://gitee.com/openeuler/kernel.git OLK-6.6
branch HEAD: 05607873db411ec3c614313b43cec60138c26a99 !5768 resctrl: fix undefined reference to lockdep_is_cpus_held()
Error/Warning reports:
https://lore.kernel.org/oe-kbuild-all/202404082139.0v0lXo9X-lkp@intel.com
https://lore.kernel.org/oe-kbuild-all/202404090038.WzHqywzr-lkp@intel.com
https://lore.kernel.org/oe-kbuild-all/202404090328.GBESI86e-lkp@intel.com
Error/Warning: (recently discovered and may have been fixed)
drivers/net/ethernet/huawei/hinic/hinic_dcb.c:103:19: error: invalid application of 'sizeof' to an incomplete type 'struct ieee_ets'
drivers/net/ethernet/huawei/hinic/hinic_dcb.c:105:5: error: incomplete definition of type 'struct ieee_ets'
drivers/net/ethernet/huawei/hinic/hinic_dcb.c:117:19: error: invalid application of 'sizeof' to an incomplete type 'struct ieee_pfc'
drivers/net/ethernet/huawei/hinic/hinic_dcb.c:118:5: error: incomplete definition of type 'struct ieee_pfc'
drivers/net/ethernet/huawei/hinic/hinic_dcb.c:244:22: error: use of undeclared identifier 'DCB_CAP_DCBX_HOST'
drivers/net/ethernet/huawei/hinic/hinic_dcb.c:244:42: error: use of undeclared identifier 'DCB_CAP_DCBX_VER_CEE'
drivers/net/ethernet/huawei/hinic/hinic_dcb.c:475:14: error: use of undeclared identifier 'DCB_ATTR_VALUE_UNDEFINED'
drivers/net/ethernet/huawei/hinic/hinic_lld.c:2138:10: error: use of undeclared identifier 'disable_vf_load'
drivers/net/ethernet/huawei/hinic/hinic_main.c:2291:21: error: no member named 'dcbnl_ops' in 'struct net_device'
drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h:210:19: error: field has incomplete type 'struct ieee_ets'
drivers/net/ethernet/huawei/hinic/hinic_nic_dev.h:212:19: error: field has incomplete type 'struct ieee_pfc'
Error/Warning ids grouped by kconfigs:
gcc_recent_errors
|-- loongarch-allmodconfig
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-poll_free_mdio
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnp500_get_link_ksettings
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnp500_get_pauseparam
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnp500_set_link_ksettings
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnp500_set_pauseparam
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_addr_list_itr
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_check_mac_link_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_clean_link_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_driver_status_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_eth_clear_rar_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_eth_clear_vmdq_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_eth_clr_all_layer2_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_eth_clr_all_tuple5_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_eth_clr_layer2_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_eth_clr_mc_addr_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_eth_clr_tuple5_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_eth_clr_vfta_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_eth_set_layer2_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_eth_set_rar_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_eth_set_rss_hfunc_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_eth_set_rx_skip_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_eth_set_tcp_sync_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_eth_set_tuple5_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_eth_set_vfta_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_eth_set_vmdq_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_eth_update_mc_addr_list_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_eth_update_rss_key_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_eth_update_rss_table_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_get_lldp_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_get_lpi_status_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_get_ncsi_mac_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_get_ncsi_vlan_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_get_permtion_mac_addr_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_init_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_layer2_pritologic_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_mac_check_link_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_mac_fc_mode_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_mac_mdio_read_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_mac_mdio_write_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_mac_pmt_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_mac_set_eee_timer_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_mac_set_mac_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_mdio_read
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_phy_read_reg_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_phy_write_reg_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_reset_eee_mode_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_reset_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_set_eee_mode_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_set_eee_pls_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_set_eee_timer_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_set_ethtool_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_set_lldp_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_set_vf_vlan_mode_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_setup_eee_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_setup_mac_link_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_setup_wol_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_start_hw_ops_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_chip.c:warning:no-previous-prototype-for-rnpgbe_tuple5_pritologic_n500
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_common.h:warning:suggest-braces-around-empty-body-in-an-if-statement
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_lib.c:warning:no-previous-prototype-for-rnpgbe_acquire_msix_vectors
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_lib.c:warning:no-previous-prototype-for-update_ring_count
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_main.c:warning:directive-output-may-be-truncated-writing-byte-into-a-region-of-size-between-and
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_main.c:warning:no-previous-prototype-for-print_status
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_main.c:warning:no-previous-prototype-for-rnpgbe_assign_netdev_ops
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_main.c:warning:no-previous-prototype-for-rnpgbe_disable_eee_mode
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_main.c:warning:no-previous-prototype-for-rnpgbe_eee_init
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_main.c:warning:no-previous-prototype-for-rnpgbe_phy_init_eee
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_main.c:warning:no-previous-prototype-for-rnpgbe_reinit_locked
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_main.c:warning:no-previous-prototype-for-rnpgbe_rx_ring_reinit
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_main.c:warning:no-previous-prototype-for-rnpgbe_vlan_stags_flag
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_main.c:warning:no-previous-prototype-for-rnpgbe_write_eitr_rx
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_main.c:warning:no-previous-prototype-for-rnpgbe_xmit_nop_frame_ring
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_main.c:warning:suggest-braces-around-empty-body-in-an-if-statement
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_mbx_fw.c:warning:build_writereg_req-accessing-bytes-in-a-region-of-size
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_mbx_fw.c:warning:no-previous-prototype-for-mbx_cookie_zalloc
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_mbx_fw.c:warning:no-previous-prototype-for-rnpgbe_fw_reg_read
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_mbx_fw.c:warning:no-previous-prototype-for-rnpgbe_fw_send_cmd_wait
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_mbx_fw.c:warning:no-previous-prototype-for-rnpgbe_link_stat_mark_disable
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_mbx_fw.c:warning:no-previous-prototype-for-rnpgbe_link_stat_mark_reset
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_mbx_fw.c:warning:no-previous-prototype-for-rnpgbe_mbx_fw_post_req
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_mbx_fw.c:warning:no-previous-prototype-for-rnpgbe_mbx_get_link
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_mbx_fw.c:warning:no-previous-prototype-for-rnpgbe_mbx_write_posted_locked
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_mbx_fw.c:warning:no-previous-prototype-for-to_mac_type
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_ptp.c:warning:no-previous-prototype-for-rnpgbe_ptp_setup_ptp
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_sriov.c:warning:no-previous-prototype-for-check_ari_mode
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_sriov.c:warning:no-previous-prototype-for-rnpgbe_msg_post_status_signle_link
| |-- drivers-net-ethernet-mucse-rnpgbe-rnpgbe_sysfs.c:warning:no-previous-prototype-for-n500_exchange_share_ram
| |-- drivers-net-ethernet-mucse-rnpgbevf-rnpgbevf_ethtool.c:warning:no-previous-prototype-for-rnpgbevf_get_ringparam
| |-- drivers-net-ethernet-mucse-rnpgbevf-rnpgbevf_ethtool.c:warning:no-previous-prototype-for-rnpgbevf_set_ringparam
| |-- drivers-net-ethernet-mucse-rnpgbevf-rnpgbevf_ethtool.c:warning:rnp_gstrings_test-defined-but-not-used
| |-- drivers-net-ethernet-mucse-rnpgbevf-rnpgbevf_main.c:warning:d-directive-output-may-be-truncated-writing-between-and-bytes-into-a-region-of-size-between-and
| |-- drivers-net-ethernet-mucse-rnpgbevf-rnpgbevf_main.c:warning:no-previous-prototype-for-rnpgbevf_alloc_rx_buffers
| |-- drivers-net-ethernet-mucse-rnpgbevf-rnpgbevf_main.c:warning:no-previous-prototype-for-rnpgbevf_assign_netdev_ops
| |-- drivers-net-ethernet-mucse-rnpgbevf-rnpgbevf_main.c:warning:no-previous-prototype-for-rnpgbevf_configure_rx_ring
| |-- drivers-net-ethernet-mucse-rnpgbevf-rnpgbevf_main.c:warning:no-previous-prototype-for-rnpgbevf_configure_tx_ring
| |-- drivers-net-ethernet-mucse-rnpgbevf-rnpgbevf_main.c:warning:no-previous-prototype-for-rnpgbevf_disable_rx_queue
| |-- drivers-net-ethernet-mucse-rnpgbevf-rnpgbevf_main.c:warning:no-previous-prototype-for-rnpgbevf_enable_rx_queue
| |-- drivers-net-ethernet-mucse-rnpgbevf-rnpgbevf_main.c:warning:no-previous-prototype-for-rnpgbevf_maybe_tx_ctxtdesc
| |-- drivers-net-ethernet-mucse-rnpgbevf-rnpgbevf_main.c:warning:no-previous-prototype-for-rnpgbevf_tx_ctxtdesc
| |-- drivers-net-ethernet-mucse-rnpgbevf-rnpgbevf_main.c:warning:no-previous-prototype-for-rnpgbevf_unmap_and_free_tx_resource
| |-- drivers-net-ethernet-mucse-rnpgbevf-rnpgbevf_main.c:warning:no-previous-prototype-for-rnpgbevf_write_eitr_rx
| |-- drivers-net-ethernet-mucse-rnpgbevf-rnpgbevf_main.c:warning:no-previous-prototype-for-rnpgbevf_xmit_frame_ring
| |-- drivers-net-ethernet-mucse-rnpgbevf-vf.c:warning:no-previous-prototype-for-rnpgbevf_addr_list_itr
| |-- drivers-net-ethernet-mucse-rnpgbevf-vf.c:warning:no-previous-prototype-for-rnpgbevf_get_queues
| |-- drivers-net-ethernet-mucse-rnpgbevf-vf.c:warning:no-previous-prototype-for-rnpgbevf_negotiate_api_version
| |-- drivers-net-ethernet-mucse-rnpgbevf-vf.c:warning:no-previous-prototype-for-rnpgbevf_set_veb_mac_n500
| |-- drivers-net-ethernet-mucse-rnpgbevf-vf.c:warning:no-previous-prototype-for-rnpgbevf_set_vlan_n500
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_common.c:warning:expecting-prototype-for-rnpm_enable_rx_buff().-Prototype-was-for-rnpm_enable_rx_buff_generic()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_common.c:warning:expecting-prototype-for-rnpm_update_mc_addr_list_generic().-Prototype-was-for-rnpm_update_mutiport_mc_addr_list_generic()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_debugfs.c:warning:expecting-prototype-for-rnpm_dbg_reg_ops_write().-Prototype-was-for-rnpm_dbg_phy_ops_write()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_ethtool.c:warning:no-previous-prototype-for-rnpm_get_phy_statistics
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_lib.c:warning:no-previous-prototype-for-rnpm_setup_layer2_remapping
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_lib.c:warning:no-previous-prototype-for-rnpm_setup_tuple5_remapping
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_lib.c:warning:no-previous-prototype-for-rnpm_setup_tuple5_remapping_tcam
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:directive-output-may-be-truncated-writing-byte-into-a-region-of-size-between-and
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-ixgbe_write_eitr().-Prototype-was-for-rnpm_write_eitr()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_irq_affinity_notify().-Prototype-was-for-rnpm_irq_affinity_notify()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_irq_affinity_release().-Prototype-was-for-rnpm_irq_affinity_release()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_is_non_eop().-Prototype-was-for-rnpm_is_non_eop()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnpm_set_ivar().-Prototype-was-for-rnpm_set_ring_vector()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:no-previous-prototype-for-clean_all_port_resetting
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:no-previous-prototype-for-control_mac_rx
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:no-previous-prototype-for-rnpm_assign_netdev_ops
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:no-previous-prototype-for-rnpm_can_rpu_start
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:no-previous-prototype-for-rnpm_check_mc_addr
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:no-previous-prototype-for-rnpm_clear_udp_tunnel_port
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:no-previous-prototype-for-rnpm_fix_queue_number
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:no-previous-prototype-for-rnpm_pf_service_event_schedule
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:no-previous-prototype-for-rnpm_pf_service_task
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:no-previous-prototype-for-rnpm_pf_service_timer
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:no-previous-prototype-for-rnpm_rx_ring_reinit
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:no-previous-prototype-for-rnpm_service_timer
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:no-previous-prototype-for-rnpm_vlan_stags_flag
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:no-previous-prototype-for-rnpm_write_eitr
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:no-previous-prototype-for-rnpm_xmit_nop_frame_ring
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:no-previous-prototype-for-rnpm_xmit_nop_frame_ring_temp
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:no-previous-prototype-for-update_pf_vlan
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:no-previous-prototype-for-wait_all_port_resetting
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_mbx_fw.c:warning:Cannot-understand-speed:
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_mbx_fw.c:warning:build_writereg_req-accessing-bytes-in-a-region-of-size
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_mbx_fw.c:warning:no-previous-prototype-for-mbx_cookie_zalloc
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_mbx_fw.c:warning:no-previous-prototype-for-rnpm_fw_get_capablity
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_mbx_fw.c:warning:no-previous-prototype-for-rnpm_fw_reg_read
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_mbx_fw.c:warning:no-previous-prototype-for-rnpm_fw_send_cmd_wait
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_mbx_fw.c:warning:no-previous-prototype-for-rnpm_get_port_stats2
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_mbx_fw.c:warning:no-previous-prototype-for-rnpm_link_stat_mark_disable
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_mbx_fw.c:warning:no-previous-prototype-for-rnpm_mbx_fw_post_req
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_mbx_fw.c:warning:no-previous-prototype-for-rnpm_mbx_lldp_all_ports_enable
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_mbx_fw.c:warning:no-previous-prototype-for-rnpm_mbx_pluginout_evt_en
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_mbx_fw.c:warning:no-previous-prototype-for-rnpm_mbx_write_posted_locked
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_n10.c:warning:expecting-prototype-for-rnpm_atr_add_signature_filter_n10().-Prototype-was-for-rnpm_fdir_add_signature_filter_n10()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_n10.c:warning:no-previous-prototype-for-rnpm_reset_pipeline_n10
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_ptp.c:warning:no-previous-prototype-for-rnpm_ptp_setup_ptp
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_ptp.c:warning:suggest-braces-around-empty-body-in-an-if-statement
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:no-previous-prototype-for-rnpm_get_vf_ringnum
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:no-previous-prototype-for-rnpm_setup_ring_maxrate
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:variable-y-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_sysfs.c:warning:no-previous-prototype-for-rnpm_mbx_get_pn_sn
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_tc_u32_parse.h:warning:rnpm_ipv4_parser-defined-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:no-previous-prototype-for-rnpvf_get_ringparam
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:rnp_gstrings_test-defined-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:variable-queue_idx-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_check_for_ack_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_check_for_msg_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_check_for_rst_msg_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_obtain_mbx_lock_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_poll_for_ack
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_poll_for_msg
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_read_mbx_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_read_posted_mbx
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_write_mbx_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_write_posted_mbx
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Excess-function-parameter-data-description-in-rnpvf_watchdog
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Excess-function-parameter-rx_ring-description-in-rnpvf_pull_tail
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Excess-function-parameter-skb-description-in-rnpvf_is_non_eop
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-rnpvf_msix_vector-not-described-in-rnpvf_set_ring_vector
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-rnpvf_queue-not-described-in-rnpvf_set_ring_vector
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-t-not-described-in-rnpvf_watchdog
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-type-not-described-in-rnpvf_update_itr
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:d-directive-output-may-be-truncated-writing-between-and-bytes-into-a-region-of-size-between-and
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnp_clean_rx_irq().-Prototype-was-for-rnpvf_clean_rx_irq()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnp_clean_rx_ring().-Prototype-was-for-rnpvf_clean_rx_ring()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnpvf_set_ivar().-Prototype-was-for-rnpvf_set_ring_vector()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnpvf_write_eitr().-Prototype-was-for-rnpvf_write_eitr_rx()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-rnpvf_alloc_rx_buffers
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-rnpvf_assign_netdev_ops
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-rnpvf_configure_rx_ring
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-rnpvf_configure_tx_ring
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-rnpvf_disable_rx_queue
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-rnpvf_enable_rx_queue
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-rnpvf_maybe_tx_ctxtdesc
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-rnpvf_tx_ctxtdesc
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-rnpvf_unmap_and_free_tx_resource
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-rnpvf_write_eitr_rx
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-rnpvf_xmit_frame_ring
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-update_rx_count
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-err-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-hw-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-vector_threshold-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:Excess-function-parameter-mac_addr-description-in-rnpvf_get_queues_vf
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-rnpvf_addr_list_itr
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-rnpvf_get_queues
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-rnpvf_negotiate_api_version
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-rnpvf_set_veb_mac_n10
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-rnpvf_set_vlan_n10
| `-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:variable-number_of_queues-set-but-not-used
|-- loongarch-randconfig-001-20240408
| |-- arch-loongarch-kvm-exit.c:error:struct-sched_info-has-no-member-named-run_delay
| `-- arch-loongarch-kvm-vcpu.c:error:struct-sched_info-has-no-member-named-run_delay
`-- loongarch-randconfig-r113-20240408
|-- arch-loongarch-kernel-legacy_boot.c:sparse:sparse:symbol-bpi_init-was-not-declared.-Should-it-be-static
|-- arch-loongarch-kernel-legacy_boot.c:sparse:sparse:symbol-eiointc_default-was-not-declared.-Should-it-be-static
|-- arch-loongarch-kernel-legacy_boot.c:sparse:sparse:symbol-liointc_default-was-not-declared.-Should-it-be-static
|-- arch-loongarch-kernel-legacy_boot.c:sparse:sparse:symbol-liointc_domain-was-not-declared.-Should-it-be-static
|-- arch-loongarch-kernel-legacy_boot.c:sparse:sparse:symbol-nr_io_pics-was-not-declared.-Should-it-be-static
|-- arch-loongarch-kernel-legacy_boot.c:sparse:sparse:symbol-pch_lpc_domain-was-not-declared.-Should-it-be-static
|-- arch-loongarch-kernel-legacy_boot.c:sparse:sparse:symbol-pch_msi_domain-was-not-declared.-Should-it-be-static
|-- arch-loongarch-kernel-legacy_boot.c:sparse:sparse:symbol-pch_pic_domain-was-not-declared.-Should-it-be-static
|-- arch-loongarch-kernel-legacy_boot.c:sparse:sparse:symbol-pchlpc_default-was-not-declared.-Should-it-be-static
|-- arch-loongarch-kernel-legacy_boot.c:sparse:sparse:symbol-pchmsi_default-was-not-declared.-Should-it-be-static
|-- arch-loongarch-kernel-legacy_boot.c:sparse:sparse:symbol-pchpic_default-was-not-declared.-Should-it-be-static
|-- arch-loongarch-kernel-legacy_boot.c:sparse:sparse:symbol-register_default_pic-was-not-declared.-Should-it-be-static
|-- drivers-char-ipmi-ipmi_si_ls2k500.c:sparse:sparse:incorrect-type-in-assignment-(different-address-spaces)-expected-void-addressable-assigned-addr_source_data-got-void-noderef-__iomem
|-- drivers-char-ipmi-ipmi_si_ls2k500.c:sparse:sparse:symbol-ipmi_ls2k500_platform_driver-was-not-declared.-Should-it-be-static
|-- drivers-char-ipmi-ipmi_si_ls2k500.c:sparse:sparse:symbol-ipmi_si_sim_setup-was-not-declared.-Should-it-be-static
|-- drivers-net-ethernet-hisilicon-hns3-hns3_common-hclge_comm_cmd.c:sparse:sparse:incorrect-type-in-argument-(different-base-types)-expected-unsigned-short-usertype-opcode-got-restricted-__le16-usertype-
|-- drivers-net-ethernet-hisilicon-hns3-hns3pf-.-hclge_trace.h:sparse:sparse:cast-to-restricted-__le32
|-- drivers-net-ethernet-hisilicon-hns3-hns3pf-hclge_main.c:sparse:sparse:symbol-hclge_ops-was-not-declared.-Should-it-be-static
`-- drivers-net-ethernet-hisilicon-hns3-hns3vf-hclgevf_mbx.c:sparse:sparse:incorrect-type-in-argument-(different-base-types)-expected-unsigned-short-usertype-qb_state-got-restricted-__le16-usertype
clang_recent_errors
|-- arm64-allmodconfig
| |-- drivers-net-ethernet-huawei-hinic-hinic_api_cmd.c:warning:expecting-prototype-for-prepare_cell().-Prototype-was-for-wait_for_resp_polling()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_cfg.c:warning:arithmetic-between-different-enumeration-types-(-enum-hinic_node_id-and-enum-hinic_fault_err_level-)
| |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_aeq_register_sw_cb().-Prototype-was-for-hinic_aeq_register_swe_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_aeq_unregister_sw_cb().-Prototype-was-for-hinic_aeq_unregister_swe_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_ceq_register_sw_cb().-Prototype-was-for-hinic_ceq_register_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_hwdev.c:warning:arithmetic-between-different-enumeration-types-(-enum-hinic_node_id-and-enum-hinic_fault_err_level-)
| |-- drivers-net-ethernet-huawei-hinic-hinic_hwif.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
| |-- drivers-net-ethernet-huawei-hinic-hinic_mbox.c:warning:expecting-prototype-for-hinic_unregister_ppf_mbox_cb().-Prototype-was-for-hinic_unregister_pf_mbox_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_mbox.c:warning:expecting-prototype-for-hinic_unregister_ppf_mbox_cb().-Prototype-was-for-hinic_unregister_ppf_to_pf_mbox_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_mgmt.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
| |-- drivers-net-ethernet-huawei-hinic-hinic_nic_dbg.c:warning:arithmetic-between-different-enumeration-types-(-enum-hinic_node_id-and-enum-hinic_fault_err_level-)
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_api_cmd.c:warning:expecting-prototype-for-alloc_cmd_buf().-Prototype-was-for-alloc_resp_buf()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_api_cmd.c:warning:expecting-prototype-for-prepare_cell().-Prototype-was-for-wait_for_resp_polling()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_devlink.c:warning:variable-pdev-set-but-not-used
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mbox.c:warning:expecting-prototype-for-hinic3_unregister_ppf_mbox_cb().-Prototype-was-for-hinic3_unregister_pf_mbox_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mbox.c:warning:expecting-prototype-for-hinic3_unregister_ppf_mbox_cb().-Prototype-was-for-hinic3_unregister_ppf_to_pf_mbox_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mgmt.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mgmt.c:warning:expecting-prototype-for-hinic_pf_to_mgmt_free().-Prototype-was-for-hinic3_pf_to_mgmt_free()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mgmt.c:warning:expecting-prototype-for-hinic_pf_to_mgmt_init().-Prototype-was-for-hinic3_pf_to_mgmt_init()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_common.c:warning:expecting-prototype-for-rnpm_enable_rx_buff().-Prototype-was-for-rnpm_enable_rx_buff_generic()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_common.c:warning:expecting-prototype-for-rnpm_update_mc_addr_list_generic().-Prototype-was-for-rnpm_update_mutiport_mc_addr_list_generic()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_debugfs.c:warning:expecting-prototype-for-rnpm_dbg_reg_ops_write().-Prototype-was-for-rnpm_dbg_phy_ops_write()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_ethtool.c:warning:no-previous-prototype-for-function-rnpm_get_phy_statistics
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-ixgbe_write_eitr().-Prototype-was-for-rnpm_write_eitr()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_irq_affinity_notify().-Prototype-was-for-rnpm_irq_affinity_notify()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_irq_affinity_release().-Prototype-was-for-rnpm_irq_affinity_release()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_is_non_eop().-Prototype-was-for-rnpm_is_non_eop()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnpm_set_ivar().-Prototype-was-for-rnpm_set_ring_vector()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_mbx_fw.c:warning:Cannot-understand-speed:
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_n10.c:warning:expecting-prototype-for-rnpm_atr_add_signature_filter_n10().-Prototype-was-for-rnpm_fdir_add_signature_filter_n10()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:no-previous-prototype-for-function-rnpm_get_vf_ringnum
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:no-previous-prototype-for-function-rnpm_setup_ring_maxrate
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:variable-y-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:no-previous-prototype-for-function-rnpvf_get_ringparam
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:unused-variable-rnp_gstrings_test
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:variable-advertising-is-uninitialized-when-used-here
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:variable-advertising-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:variable-queue_idx-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_check_for_ack_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_check_for_msg_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_check_for_rst_msg_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_obtain_mbx_lock_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_poll_for_ack
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_poll_for_msg
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_read_mbx_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_read_posted_mbx
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_write_mbx_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_write_posted_mbx
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Excess-function-parameter-data-description-in-rnpvf_watchdog
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Excess-function-parameter-rx_ring-description-in-rnpvf_pull_tail
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Excess-function-parameter-skb-description-in-rnpvf_is_non_eop
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-rnpvf_msix_vector-not-described-in-rnpvf_set_ring_vector
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-rnpvf_queue-not-described-in-rnpvf_set_ring_vector
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-t-not-described-in-rnpvf_watchdog
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-type-not-described-in-rnpvf_update_itr
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnp_clean_rx_irq().-Prototype-was-for-rnpvf_clean_rx_irq()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnp_clean_rx_ring().-Prototype-was-for-rnpvf_clean_rx_ring()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnpvf_set_ivar().-Prototype-was-for-rnpvf_set_ring_vector()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnpvf_write_eitr().-Prototype-was-for-rnpvf_write_eitr_rx()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_alloc_rx_buffers
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_assign_netdev_ops
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_configure_rx_ring
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_configure_tx_ring
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_disable_rx_queue
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_enable_rx_queue
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_maybe_tx_ctxtdesc
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_tx_ctxtdesc
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_unmap_and_free_tx_resource
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_write_eitr_rx
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_xmit_frame_ring
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-update_rx_count
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-err-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-hw-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-ring_csum_err-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-ring_csum_good-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-vector_threshold-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-xdp_xmit-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:Excess-function-parameter-mac_addr-description-in-rnpvf_get_queues_vf
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_addr_list_itr
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_get_queues
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_negotiate_api_version
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_set_veb_mac_n10
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_set_vlan_n10
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:variable-number_of_queues-set-but-not-used
| |-- drivers-scsi-hisi_raid-hiraid_main.c:warning:expecting-prototype-for-hiraid_create_cq().-Prototype-was-for-hiraid_create_complete_queue()-instead
| `-- drivers-scsi-hisi_raid-hiraid_main.c:warning:expecting-prototype-for-hiraid_create_sq().-Prototype-was-for-hiraid_create_send_queue()-instead
|-- arm64-allyesconfig
| |-- drivers-net-ethernet-huawei-hinic-hinic_api_cmd.c:warning:expecting-prototype-for-prepare_cell().-Prototype-was-for-wait_for_resp_polling()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_cfg.c:warning:arithmetic-between-different-enumeration-types-(-enum-hinic_node_id-and-enum-hinic_fault_err_level-)
| |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_aeq_register_sw_cb().-Prototype-was-for-hinic_aeq_register_swe_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_aeq_unregister_sw_cb().-Prototype-was-for-hinic_aeq_unregister_swe_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_ceq_register_sw_cb().-Prototype-was-for-hinic_ceq_register_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_hwdev.c:warning:arithmetic-between-different-enumeration-types-(-enum-hinic_node_id-and-enum-hinic_fault_err_level-)
| |-- drivers-net-ethernet-huawei-hinic-hinic_hwif.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
| |-- drivers-net-ethernet-huawei-hinic-hinic_mbox.c:warning:expecting-prototype-for-hinic_unregister_ppf_mbox_cb().-Prototype-was-for-hinic_unregister_pf_mbox_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_mbox.c:warning:expecting-prototype-for-hinic_unregister_ppf_mbox_cb().-Prototype-was-for-hinic_unregister_ppf_to_pf_mbox_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_mgmt.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
| |-- drivers-net-ethernet-huawei-hinic-hinic_nic_dbg.c:warning:arithmetic-between-different-enumeration-types-(-enum-hinic_node_id-and-enum-hinic_fault_err_level-)
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_api_cmd.c:warning:expecting-prototype-for-alloc_cmd_buf().-Prototype-was-for-alloc_resp_buf()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_api_cmd.c:warning:expecting-prototype-for-prepare_cell().-Prototype-was-for-wait_for_resp_polling()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_devlink.c:warning:variable-pdev-set-but-not-used
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mbox.c:warning:expecting-prototype-for-hinic3_unregister_ppf_mbox_cb().-Prototype-was-for-hinic3_unregister_pf_mbox_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mbox.c:warning:expecting-prototype-for-hinic3_unregister_ppf_mbox_cb().-Prototype-was-for-hinic3_unregister_ppf_to_pf_mbox_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mgmt.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mgmt.c:warning:expecting-prototype-for-hinic_pf_to_mgmt_free().-Prototype-was-for-hinic3_pf_to_mgmt_free()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mgmt.c:warning:expecting-prototype-for-hinic_pf_to_mgmt_init().-Prototype-was-for-hinic3_pf_to_mgmt_init()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_common.c:warning:expecting-prototype-for-rnpm_enable_rx_buff().-Prototype-was-for-rnpm_enable_rx_buff_generic()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_common.c:warning:expecting-prototype-for-rnpm_update_mc_addr_list_generic().-Prototype-was-for-rnpm_update_mutiport_mc_addr_list_generic()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_debugfs.c:warning:expecting-prototype-for-rnpm_dbg_reg_ops_write().-Prototype-was-for-rnpm_dbg_phy_ops_write()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_ethtool.c:warning:no-previous-prototype-for-function-rnpm_get_phy_statistics
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-ixgbe_write_eitr().-Prototype-was-for-rnpm_write_eitr()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_irq_affinity_notify().-Prototype-was-for-rnpm_irq_affinity_notify()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_irq_affinity_release().-Prototype-was-for-rnpm_irq_affinity_release()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_is_non_eop().-Prototype-was-for-rnpm_is_non_eop()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnpm_set_ivar().-Prototype-was-for-rnpm_set_ring_vector()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_mbx_fw.c:warning:Cannot-understand-speed:
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_n10.c:warning:expecting-prototype-for-rnpm_atr_add_signature_filter_n10().-Prototype-was-for-rnpm_fdir_add_signature_filter_n10()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:no-previous-prototype-for-function-rnpm_get_vf_ringnum
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:no-previous-prototype-for-function-rnpm_setup_ring_maxrate
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:variable-y-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:no-previous-prototype-for-function-rnpvf_get_ringparam
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:unused-variable-rnp_gstrings_test
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:variable-advertising-is-uninitialized-when-used-here
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:variable-advertising-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:variable-queue_idx-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_check_for_ack_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_check_for_msg_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_check_for_rst_msg_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_obtain_mbx_lock_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_poll_for_ack
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_poll_for_msg
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_read_mbx_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_read_posted_mbx
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_write_mbx_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_write_posted_mbx
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Excess-function-parameter-data-description-in-rnpvf_watchdog
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Excess-function-parameter-rx_ring-description-in-rnpvf_pull_tail
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Excess-function-parameter-skb-description-in-rnpvf_is_non_eop
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-rnpvf_msix_vector-not-described-in-rnpvf_set_ring_vector
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-rnpvf_queue-not-described-in-rnpvf_set_ring_vector
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-t-not-described-in-rnpvf_watchdog
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-type-not-described-in-rnpvf_update_itr
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnp_clean_rx_irq().-Prototype-was-for-rnpvf_clean_rx_irq()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnp_clean_rx_ring().-Prototype-was-for-rnpvf_clean_rx_ring()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnpvf_set_ivar().-Prototype-was-for-rnpvf_set_ring_vector()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnpvf_write_eitr().-Prototype-was-for-rnpvf_write_eitr_rx()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_alloc_rx_buffers
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_assign_netdev_ops
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_configure_rx_ring
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_configure_tx_ring
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_disable_rx_queue
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_enable_rx_queue
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_maybe_tx_ctxtdesc
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_tx_ctxtdesc
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_unmap_and_free_tx_resource
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_write_eitr_rx
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_xmit_frame_ring
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-update_rx_count
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-err-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-hw-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-ring_csum_err-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-ring_csum_good-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-vector_threshold-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-xdp_xmit-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:Excess-function-parameter-mac_addr-description-in-rnpvf_get_queues_vf
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_addr_list_itr
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_get_queues
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_negotiate_api_version
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_set_veb_mac_n10
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_set_vlan_n10
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:variable-number_of_queues-set-but-not-used
| |-- drivers-scsi-hisi_raid-hiraid_main.c:warning:expecting-prototype-for-hiraid_create_cq().-Prototype-was-for-hiraid_create_complete_queue()-instead
| |-- drivers-scsi-hisi_raid-hiraid_main.c:warning:expecting-prototype-for-hiraid_create_sq().-Prototype-was-for-hiraid_create_send_queue()-instead
| |-- ld.lld:error:duplicate-symbol:check_ari_mode
| |-- ld.lld:error:duplicate-symbol:mbx_cookie_zalloc
| |-- ld.lld:error:duplicate-symbol:register_mbx_irq
| |-- ld.lld:error:duplicate-symbol:remove_mbx_irq
| `-- ld.lld:error:duplicate-symbol:rnp10_netdev_ops
|-- x86_64-allmodconfig
| |-- drivers-net-ethernet-huawei-hinic-hinic_api_cmd.c:warning:expecting-prototype-for-prepare_cell().-Prototype-was-for-wait_for_resp_polling()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_aeq_register_sw_cb().-Prototype-was-for-hinic_aeq_register_swe_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_aeq_unregister_sw_cb().-Prototype-was-for-hinic_aeq_unregister_swe_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_ceq_register_sw_cb().-Prototype-was-for-hinic_ceq_register_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_hwif.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
| |-- drivers-net-ethernet-huawei-hinic-hinic_mbox.c:warning:expecting-prototype-for-hinic_unregister_ppf_mbox_cb().-Prototype-was-for-hinic_unregister_pf_mbox_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_mbox.c:warning:expecting-prototype-for-hinic_unregister_ppf_mbox_cb().-Prototype-was-for-hinic_unregister_ppf_to_pf_mbox_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_mgmt.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_api_cmd.c:warning:expecting-prototype-for-alloc_cmd_buf().-Prototype-was-for-alloc_resp_buf()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_api_cmd.c:warning:expecting-prototype-for-prepare_cell().-Prototype-was-for-wait_for_resp_polling()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_devlink.c:warning:variable-pdev-set-but-not-used
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mbox.c:warning:expecting-prototype-for-hinic3_unregister_ppf_mbox_cb().-Prototype-was-for-hinic3_unregister_pf_mbox_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mbox.c:warning:expecting-prototype-for-hinic3_unregister_ppf_mbox_cb().-Prototype-was-for-hinic3_unregister_ppf_to_pf_mbox_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mgmt.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mgmt.c:warning:expecting-prototype-for-hinic_pf_to_mgmt_free().-Prototype-was-for-hinic3_pf_to_mgmt_free()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mgmt.c:warning:expecting-prototype-for-hinic_pf_to_mgmt_init().-Prototype-was-for-hinic3_pf_to_mgmt_init()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_common.c:warning:expecting-prototype-for-rnpm_enable_rx_buff().-Prototype-was-for-rnpm_enable_rx_buff_generic()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_common.c:warning:expecting-prototype-for-rnpm_update_mc_addr_list_generic().-Prototype-was-for-rnpm_update_mutiport_mc_addr_list_generic()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_debugfs.c:warning:expecting-prototype-for-rnpm_dbg_reg_ops_write().-Prototype-was-for-rnpm_dbg_phy_ops_write()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_ethtool.c:warning:no-previous-prototype-for-function-rnpm_get_phy_statistics
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-ixgbe_write_eitr().-Prototype-was-for-rnpm_write_eitr()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_irq_affinity_notify().-Prototype-was-for-rnpm_irq_affinity_notify()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_irq_affinity_release().-Prototype-was-for-rnpm_irq_affinity_release()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_is_non_eop().-Prototype-was-for-rnpm_is_non_eop()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnpm_set_ivar().-Prototype-was-for-rnpm_set_ring_vector()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_mbx_fw.c:warning:Cannot-understand-speed:
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_n10.c:warning:expecting-prototype-for-rnpm_atr_add_signature_filter_n10().-Prototype-was-for-rnpm_fdir_add_signature_filter_n10()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:no-previous-prototype-for-function-rnpm_get_vf_ringnum
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:no-previous-prototype-for-function-rnpm_setup_ring_maxrate
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:variable-y-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:no-previous-prototype-for-function-rnpvf_get_ringparam
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:unused-variable-rnp_gstrings_test
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:variable-advertising-is-uninitialized-when-used-here
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:variable-advertising-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:variable-queue_idx-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_check_for_ack_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_check_for_msg_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_check_for_rst_msg_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_obtain_mbx_lock_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_poll_for_ack
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_poll_for_msg
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_read_mbx_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_read_posted_mbx
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_write_mbx_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_write_posted_mbx
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Excess-function-parameter-data-description-in-rnpvf_watchdog
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Excess-function-parameter-rx_ring-description-in-rnpvf_pull_tail
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Excess-function-parameter-skb-description-in-rnpvf_is_non_eop
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-rnpvf_msix_vector-not-described-in-rnpvf_set_ring_vector
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-rnpvf_queue-not-described-in-rnpvf_set_ring_vector
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-t-not-described-in-rnpvf_watchdog
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-type-not-described-in-rnpvf_update_itr
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnp_clean_rx_irq().-Prototype-was-for-rnpvf_clean_rx_irq()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnp_clean_rx_ring().-Prototype-was-for-rnpvf_clean_rx_ring()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnpvf_set_ivar().-Prototype-was-for-rnpvf_set_ring_vector()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnpvf_write_eitr().-Prototype-was-for-rnpvf_write_eitr_rx()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_alloc_rx_buffers
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_assign_netdev_ops
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_configure_rx_ring
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_configure_tx_ring
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_disable_rx_queue
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_enable_rx_queue
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_maybe_tx_ctxtdesc
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_tx_ctxtdesc
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_unmap_and_free_tx_resource
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_write_eitr_rx
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_xmit_frame_ring
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-update_rx_count
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-err-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-hw-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-ring_csum_err-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-ring_csum_good-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-vector_threshold-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-xdp_xmit-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:Excess-function-parameter-mac_addr-description-in-rnpvf_get_queues_vf
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_addr_list_itr
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_get_queues
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_negotiate_api_version
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_set_veb_mac_n10
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_set_vlan_n10
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:variable-number_of_queues-set-but-not-used
| |-- drivers-scsi-hisi_raid-hiraid_main.c:warning:expecting-prototype-for-hiraid_create_cq().-Prototype-was-for-hiraid_create_complete_queue()-instead
| `-- drivers-scsi-hisi_raid-hiraid_main.c:warning:expecting-prototype-for-hiraid_create_sq().-Prototype-was-for-hiraid_create_send_queue()-instead
|-- x86_64-allyesconfig
| |-- drivers-net-ethernet-huawei-hinic-hinic_api_cmd.c:warning:expecting-prototype-for-prepare_cell().-Prototype-was-for-wait_for_resp_polling()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_aeq_register_sw_cb().-Prototype-was-for-hinic_aeq_register_swe_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_aeq_unregister_sw_cb().-Prototype-was-for-hinic_aeq_unregister_swe_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_ceq_register_sw_cb().-Prototype-was-for-hinic_ceq_register_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_hwif.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
| |-- drivers-net-ethernet-huawei-hinic-hinic_mbox.c:warning:expecting-prototype-for-hinic_unregister_ppf_mbox_cb().-Prototype-was-for-hinic_unregister_pf_mbox_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_mbox.c:warning:expecting-prototype-for-hinic_unregister_ppf_mbox_cb().-Prototype-was-for-hinic_unregister_ppf_to_pf_mbox_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_mgmt.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_api_cmd.c:warning:expecting-prototype-for-alloc_cmd_buf().-Prototype-was-for-alloc_resp_buf()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_api_cmd.c:warning:expecting-prototype-for-prepare_cell().-Prototype-was-for-wait_for_resp_polling()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_devlink.c:warning:variable-pdev-set-but-not-used
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mbox.c:warning:expecting-prototype-for-hinic3_unregister_ppf_mbox_cb().-Prototype-was-for-hinic3_unregister_pf_mbox_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mbox.c:warning:expecting-prototype-for-hinic3_unregister_ppf_mbox_cb().-Prototype-was-for-hinic3_unregister_ppf_to_pf_mbox_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mgmt.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mgmt.c:warning:expecting-prototype-for-hinic_pf_to_mgmt_free().-Prototype-was-for-hinic3_pf_to_mgmt_free()-instead
| |-- drivers-net-ethernet-huawei-hinic3-hw-hinic3_mgmt.c:warning:expecting-prototype-for-hinic_pf_to_mgmt_init().-Prototype-was-for-hinic3_pf_to_mgmt_init()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_common.c:warning:expecting-prototype-for-rnpm_enable_rx_buff().-Prototype-was-for-rnpm_enable_rx_buff_generic()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_common.c:warning:expecting-prototype-for-rnpm_update_mc_addr_list_generic().-Prototype-was-for-rnpm_update_mutiport_mc_addr_list_generic()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_debugfs.c:warning:expecting-prototype-for-rnpm_dbg_reg_ops_write().-Prototype-was-for-rnpm_dbg_phy_ops_write()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_ethtool.c:warning:no-previous-prototype-for-function-rnpm_get_phy_statistics
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-ixgbe_write_eitr().-Prototype-was-for-rnpm_write_eitr()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_irq_affinity_notify().-Prototype-was-for-rnpm_irq_affinity_notify()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_irq_affinity_release().-Prototype-was-for-rnpm_irq_affinity_release()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnp_is_non_eop().-Prototype-was-for-rnpm_is_non_eop()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_main.c:warning:expecting-prototype-for-rnpm_set_ivar().-Prototype-was-for-rnpm_set_ring_vector()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_mbx_fw.c:warning:Cannot-understand-speed:
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_n10.c:warning:expecting-prototype-for-rnpm_atr_add_signature_filter_n10().-Prototype-was-for-rnpm_fdir_add_signature_filter_n10()-instead
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:no-previous-prototype-for-function-rnpm_get_vf_ringnum
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:no-previous-prototype-for-function-rnpm_setup_ring_maxrate
| |-- drivers-net-ethernet-mucse-rnpm-rnpm_sriov.c:warning:variable-y-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:no-previous-prototype-for-function-rnpvf_get_ringparam
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:unused-variable-rnp_gstrings_test
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:variable-advertising-is-uninitialized-when-used-here
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:variable-advertising-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-ethtool.c:warning:variable-queue_idx-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_check_for_ack_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_check_for_msg_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_check_for_rst_msg_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_obtain_mbx_lock_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_poll_for_ack
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_poll_for_msg
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_read_mbx_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_read_posted_mbx
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_write_mbx_vf
| |-- drivers-net-ethernet-mucse-rnpvf-mbx.c:warning:Function-parameter-or-member-to_cm3-not-described-in-rnpvf_write_posted_mbx
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Excess-function-parameter-data-description-in-rnpvf_watchdog
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Excess-function-parameter-rx_ring-description-in-rnpvf_pull_tail
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Excess-function-parameter-skb-description-in-rnpvf_is_non_eop
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-rnpvf_msix_vector-not-described-in-rnpvf_set_ring_vector
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-rnpvf_queue-not-described-in-rnpvf_set_ring_vector
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-t-not-described-in-rnpvf_watchdog
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:Function-parameter-or-member-type-not-described-in-rnpvf_update_itr
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnp_clean_rx_irq().-Prototype-was-for-rnpvf_clean_rx_irq()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnp_clean_rx_ring().-Prototype-was-for-rnpvf_clean_rx_ring()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnpvf_set_ivar().-Prototype-was-for-rnpvf_set_ring_vector()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:expecting-prototype-for-rnpvf_write_eitr().-Prototype-was-for-rnpvf_write_eitr_rx()-instead
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_alloc_rx_buffers
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_assign_netdev_ops
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_configure_rx_ring
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_configure_tx_ring
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_disable_rx_queue
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_enable_rx_queue
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_maybe_tx_ctxtdesc
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_tx_ctxtdesc
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_unmap_and_free_tx_resource
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_write_eitr_rx
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-rnpvf_xmit_frame_ring
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:no-previous-prototype-for-function-update_rx_count
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-err-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-hw-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-ring_csum_err-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-ring_csum_good-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-vector_threshold-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-rnpvf_main.c:warning:variable-xdp_xmit-set-but-not-used
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:Excess-function-parameter-mac_addr-description-in-rnpvf_get_queues_vf
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_addr_list_itr
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_get_queues
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_negotiate_api_version
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_set_veb_mac_n10
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:no-previous-prototype-for-function-rnpvf_set_vlan_n10
| |-- drivers-net-ethernet-mucse-rnpvf-vf.c:warning:variable-number_of_queues-set-but-not-used
| |-- drivers-scsi-hisi_raid-hiraid_main.c:warning:expecting-prototype-for-hiraid_create_cq().-Prototype-was-for-hiraid_create_complete_queue()-instead
| |-- drivers-scsi-hisi_raid-hiraid_main.c:warning:expecting-prototype-for-hiraid_create_sq().-Prototype-was-for-hiraid_create_send_queue()-instead
| |-- ld.lld:error:duplicate-symbol:__cfi_check_ari_mode
| |-- ld.lld:error:duplicate-symbol:__cfi_mbx_cookie_zalloc
| |-- ld.lld:error:duplicate-symbol:__cfi_nic_ioctl
| |-- ld.lld:error:duplicate-symbol:__cfi_register_mbx_irq
| |-- ld.lld:error:duplicate-symbol:__cfi_remove_mbx_irq
| |-- ld.lld:error:duplicate-symbol:__cfi_set_slave_host_enable
| |-- ld.lld:error:duplicate-symbol:check_ari_mode
| |-- ld.lld:error:duplicate-symbol:mbx_cookie_zalloc
| |-- ld.lld:error:duplicate-symbol:register_mbx_irq
| |-- ld.lld:error:duplicate-symbol:remove_mbx_irq
| `-- ld.lld:error:duplicate-symbol:rnp10_netdev_ops
|-- x86_64-randconfig-004-20240408
| |-- drivers-net-ethernet-huawei-hinic-hinic_api_cmd.c:warning:expecting-prototype-for-prepare_cell().-Prototype-was-for-wait_for_resp_polling()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_dcb.c:error:incomplete-definition-of-type-struct-ieee_ets
| |-- drivers-net-ethernet-huawei-hinic-hinic_dcb.c:error:incomplete-definition-of-type-struct-ieee_pfc
| |-- drivers-net-ethernet-huawei-hinic-hinic_dcb.c:error:invalid-application-of-sizeof-to-an-incomplete-type-struct-ieee_ets
| |-- drivers-net-ethernet-huawei-hinic-hinic_dcb.c:error:invalid-application-of-sizeof-to-an-incomplete-type-struct-ieee_pfc
| |-- drivers-net-ethernet-huawei-hinic-hinic_dcb.c:error:use-of-undeclared-identifier-DCB_ATTR_VALUE_UNDEFINED
| |-- drivers-net-ethernet-huawei-hinic-hinic_dcb.c:error:use-of-undeclared-identifier-DCB_CAP_DCBX_HOST
| |-- drivers-net-ethernet-huawei-hinic-hinic_dcb.c:error:use-of-undeclared-identifier-DCB_CAP_DCBX_VER_CEE
| |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_aeq_register_sw_cb().-Prototype-was-for-hinic_aeq_register_swe_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_aeq_unregister_sw_cb().-Prototype-was-for-hinic_aeq_unregister_swe_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_ceq_register_sw_cb().-Prototype-was-for-hinic_ceq_register_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_hwif.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
| |-- drivers-net-ethernet-huawei-hinic-hinic_main.c:error:no-member-named-dcbnl_ops-in-struct-net_device
| |-- drivers-net-ethernet-huawei-hinic-hinic_mbox.c:warning:expecting-prototype-for-hinic_unregister_ppf_mbox_cb().-Prototype-was-for-hinic_unregister_pf_mbox_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_mbox.c:warning:expecting-prototype-for-hinic_unregister_ppf_mbox_cb().-Prototype-was-for-hinic_unregister_ppf_to_pf_mbox_cb()-instead
| |-- drivers-net-ethernet-huawei-hinic-hinic_mgmt.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
| |-- drivers-net-ethernet-huawei-hinic-hinic_nic_dev.h:error:field-has-incomplete-type-struct-ieee_ets
| `-- drivers-net-ethernet-huawei-hinic-hinic_nic_dev.h:error:field-has-incomplete-type-struct-ieee_pfc
`-- x86_64-randconfig-012-20240408
|-- drivers-net-ethernet-huawei-hinic-hinic_api_cmd.c:warning:expecting-prototype-for-prepare_cell().-Prototype-was-for-wait_for_resp_polling()-instead
|-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_aeq_register_sw_cb().-Prototype-was-for-hinic_aeq_register_swe_cb()-instead
|-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_aeq_unregister_sw_cb().-Prototype-was-for-hinic_aeq_unregister_swe_cb()-instead
|-- drivers-net-ethernet-huawei-hinic-hinic_eqs.c:warning:expecting-prototype-for-hinic_ceq_register_sw_cb().-Prototype-was-for-hinic_ceq_register_cb()-instead
|-- drivers-net-ethernet-huawei-hinic-hinic_hwif.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
|-- drivers-net-ethernet-huawei-hinic-hinic_lld.c:error:use-of-undeclared-identifier-disable_vf_load
|-- drivers-net-ethernet-huawei-hinic-hinic_main.c:error:call-to-undeclared-function-vlan_dev_priv-ISO-C99-and-later-do-not-support-implicit-function-declarations
|-- drivers-net-ethernet-huawei-hinic-hinic_main.c:error:member-reference-type-int-is-not-a-pointer
|-- drivers-net-ethernet-huawei-hinic-hinic_mbox.c:warning:expecting-prototype-for-hinic_unregister_ppf_mbox_cb().-Prototype-was-for-hinic_unregister_pf_mbox_cb()-instead
|-- drivers-net-ethernet-huawei-hinic-hinic_mbox.c:warning:expecting-prototype-for-hinic_unregister_ppf_mbox_cb().-Prototype-was-for-hinic_unregister_ppf_to_pf_mbox_cb()-instead
`-- drivers-net-ethernet-huawei-hinic-hinic_mgmt.c:warning:This-comment-starts-with-but-isn-t-a-kernel-doc-comment.-Refer-Documentation-doc-guide-kernel-doc.rst
elapsed time: 724m
configs tested: 41
configs skipped: 141
tested configs:
arm64 allmodconfig clang
arm64 allnoconfig gcc
arm64 defconfig gcc
arm64 randconfig-001-20240408 clang
arm64 randconfig-002-20240408 clang
arm64 randconfig-003-20240408 gcc
arm64 randconfig-004-20240408 gcc
loongarch alldefconfig gcc
loongarch allmodconfig gcc
loongarch allnoconfig gcc
loongarch defconfig gcc
loongarch randconfig-001-20240408 gcc
loongarch randconfig-002-20240408 gcc
x86_64 allnoconfig clang
x86_64 allyesconfig clang
x86_64 buildonly-randconfig-001-20240408 clang
x86_64 buildonly-randconfig-002-20240408 clang
x86_64 buildonly-randconfig-003-20240408 clang
x86_64 buildonly-randconfig-004-20240408 clang
x86_64 buildonly-randconfig-005-20240408 clang
x86_64 buildonly-randconfig-006-20240408 clang
x86_64 defconfig gcc
x86_64 randconfig-001-20240408 gcc
x86_64 randconfig-002-20240408 clang
x86_64 randconfig-003-20240408 clang
x86_64 randconfig-004-20240408 clang
x86_64 randconfig-005-20240408 clang
x86_64 randconfig-006-20240408 gcc
x86_64 randconfig-011-20240408 clang
x86_64 randconfig-012-20240408 clang
x86_64 randconfig-013-20240408 gcc
x86_64 randconfig-014-20240408 clang
x86_64 randconfig-015-20240408 clang
x86_64 randconfig-016-20240408 clang
x86_64 randconfig-071-20240408 gcc
x86_64 randconfig-072-20240408 clang
x86_64 randconfig-073-20240408 clang
x86_64 randconfig-074-20240408 gcc
x86_64 randconfig-075-20240408 gcc
x86_64 randconfig-076-20240408 clang
x86_64 rhel-8.3-rust clang
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
1
0

[openeuler:OLK-6.6 6900/7311] drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c:475:60: sparse: sparse: incorrect type in argument 1 (different base types)
by kernel test robot 09 Apr '24
by kernel test robot 09 Apr '24
09 Apr '24
tree: https://gitee.com/openeuler/kernel.git OLK-6.6
head: 05607873db411ec3c614313b43cec60138c26a99
commit: 676df2864a908565710282838af4f392acb9ebd4 [6900/7311] net: hns3: add command queue trace for hns3
config: loongarch-randconfig-r113-20240408 (https://download.01.org/0day-ci/archive/20240409/202404090328.GBESI86e-lkp@…)
compiler: loongarch64-linux-gcc (GCC) 13.2.0
reproduce: (https://download.01.org/0day-ci/archive/20240409/202404090328.GBESI86e-lkp@…)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp(a)intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202404090328.GBESI86e-lkp@intel.com/
sparse warnings: (new ones prefixed by >>)
>> drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c:475:60: sparse: sparse: incorrect type in argument 1 (different base types) @@ expected unsigned short [usertype] opcode @@ got restricted __le16 [usertype] opcode @@
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c:475:60: sparse: expected unsigned short [usertype] opcode
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c:475:60: sparse: got restricted __le16 [usertype] opcode
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c: note: in included file (through include/linux/mmzone.h, include/linux/gfp.h, include/linux/slab.h, ...):
include/linux/page-flags.h:245:46: sparse: sparse: self-comparison always evaluates to false
--
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c: note: in included file (through include/trace/trace_events.h, include/trace/define_trace.h, ...):
>> drivers/net/ethernet/hisilicon/hns3/hns3pf/./hclge_trace.h:132:1: sparse: sparse: cast to restricted __le32
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c: note: in included file (through include/trace/perf.h, include/trace/define_trace.h, drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h):
>> drivers/net/ethernet/hisilicon/hns3/hns3pf/./hclge_trace.h:132:1: sparse: sparse: cast to restricted __le32
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c: note: in included file (through include/linux/mmzone.h, include/linux/gfp.h, include/linux/xarray.h, ...):
include/linux/page-flags.h:245:46: sparse: sparse: self-comparison always evaluates to false
vim +475 drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
462
463 /**
464 * hclge_comm_cmd_send - send command to command queue
465 * @hw: pointer to the hw struct
466 * @desc: prefilled descriptor for describing the command
467 * @num : the number of descriptors to be sent
468 *
469 * This is the main send command for command queue, it
470 * sends the queue, cleans the queue, etc
471 **/
472 int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
473 int num)
474 {
> 475 bool is_special = hclge_comm_is_special_opcode(desc->opcode);
476 struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
477 int ret;
478 int ntc;
479
480 if (hw->cmq.ops.trace_cmd_send)
481 hw->cmq.ops.trace_cmd_send(hw, desc, num, is_special);
482
483 spin_lock_bh(&hw->cmq.csq.lock);
484
485 if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) {
486 spin_unlock_bh(&hw->cmq.csq.lock);
487 return -EBUSY;
488 }
489
490 if (num > hclge_comm_ring_space(&hw->cmq.csq)) {
491 /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
492 * need update the SW HEAD pointer csq->next_to_clean
493 */
494 csq->next_to_clean =
495 hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
496 spin_unlock_bh(&hw->cmq.csq.lock);
497 return -EBUSY;
498 }
499
500 /**
501 * Record the location of desc in the ring for this time
502 * which will be use for hardware to write back
503 */
504 ntc = hw->cmq.csq.next_to_use;
505
506 hclge_comm_cmd_copy_desc(hw, desc, num);
507
508 /* Write to hardware */
509 hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG,
510 hw->cmq.csq.next_to_use);
511
512 ret = hclge_comm_cmd_check_result(hw, desc, num, ntc);
513
514 spin_unlock_bh(&hw->cmq.csq.lock);
515
516 if (hw->cmq.ops.trace_cmd_get)
517 hw->cmq.ops.trace_cmd_get(hw, desc, num, is_special);
518
519 return ret;
520 }
521
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
1
0

[openeuler:OLK-6.6 6892/7311] drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c:392:58: sparse: sparse: incorrect type in argument 2 (different base types)
by kernel test robot 09 Apr '24
by kernel test robot 09 Apr '24
09 Apr '24
tree: https://gitee.com/openeuler/kernel.git OLK-6.6
head: 05607873db411ec3c614313b43cec60138c26a99
commit: 285f988e9d3a42d5aebb6c5012937771a7dbd4f7 [6892/7311] net: hns3: add queue bonding mode support for VF
config: loongarch-randconfig-r113-20240408 (https://download.01.org/0day-ci/archive/20240409/202404090038.WzHqywzr-lkp@…)
compiler: loongarch64-linux-gcc (GCC) 13.2.0
reproduce: (https://download.01.org/0day-ci/archive/20240409/202404090038.WzHqywzr-lkp@…)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp(a)intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202404090038.WzHqywzr-lkp@intel.com/
sparse warnings: (new ones prefixed by >>)
>> drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c:392:58: sparse: sparse: incorrect type in argument 2 (different base types) @@ expected unsigned short [usertype] qb_state @@ got restricted __le16 [usertype] @@
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c:392:58: sparse: expected unsigned short [usertype] qb_state
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c:392:58: sparse: got restricted __le16 [usertype]
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c: note: in included file (through include/linux/mmzone.h, include/linux/gfp.h, include/linux/xarray.h, ...):
include/linux/page-flags.h:245:46: sparse: sparse: self-comparison always evaluates to false
vim +392 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
312
313 void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
314 {
315 struct hclge_mbx_port_base_vlan *vlan_info;
316 struct hclge_mbx_link_status *link_info;
317 struct hclge_mbx_link_mode *link_mode;
318 enum hnae3_reset_type reset_type;
319 u16 link_status, state;
320 __le16 *msg_q;
321 u16 opcode;
322 u8 duplex;
323 u32 speed;
324 u32 tail;
325 u8 flag;
326 u16 idx;
327
328 tail = hdev->arq.tail;
329
330 /* process all the async queue messages */
331 while (tail != hdev->arq.head) {
332 if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
333 &hdev->hw.hw.comm_state)) {
334 dev_info(&hdev->pdev->dev,
335 "vf crq need init in async\n");
336 return;
337 }
338
339 msg_q = hdev->arq.msg_q[hdev->arq.head];
340 opcode = le16_to_cpu(msg_q[0]);
341 switch (opcode) {
342 case HCLGE_MBX_LINK_STAT_CHANGE:
343 link_info = (struct hclge_mbx_link_status *)(msg_q + 1);
344 link_status = le16_to_cpu(link_info->link_status);
345 speed = le32_to_cpu(link_info->speed);
346 duplex = (u8)le16_to_cpu(link_info->duplex);
347 flag = link_info->flag;
348
349 /* update upper layer with new link link status */
350 hclgevf_update_speed_duplex(hdev, speed, duplex);
351 hclgevf_update_link_status(hdev, link_status);
352
353 if (flag & HCLGE_MBX_PUSH_LINK_STATUS_EN)
354 set_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS,
355 &hdev->state);
356
357 break;
358 case HCLGE_MBX_LINK_STAT_MODE:
359 link_mode = (struct hclge_mbx_link_mode *)(msg_q + 1);
360 idx = le16_to_cpu(link_mode->idx);
361 if (idx)
362 hdev->hw.mac.supported =
363 le64_to_cpu(link_mode->link_mode);
364 else
365 hdev->hw.mac.advertising =
366 le64_to_cpu(link_mode->link_mode);
367 break;
368 case HCLGE_MBX_ASSERTING_RESET:
369 /* PF has asserted reset hence VF should go in pending
370 * state and poll for the hardware reset status till it
371 * has been completely reset. After this stack should
372 * eventually be re-initialized.
373 */
374 reset_type =
375 (enum hnae3_reset_type)le16_to_cpu(msg_q[1]);
376 set_bit(reset_type, &hdev->reset_pending);
377 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
378 hclgevf_reset_task_schedule(hdev);
379
380 break;
381 case HCLGE_MBX_PUSH_VLAN_INFO:
382 vlan_info =
383 (struct hclge_mbx_port_base_vlan *)(msg_q + 1);
384 state = le16_to_cpu(vlan_info->state);
385 hclgevf_update_port_base_vlan_info(hdev, state,
386 vlan_info);
387 break;
388 case HCLGE_MBX_PUSH_PROMISC_INFO:
389 hclgevf_parse_promisc_info(hdev, le16_to_cpu(msg_q[1]));
390 break;
391 case HCLGE_MBX_PUSH_QB_STATE:
> 392 hclgevf_parse_qb_info(hdev, msg_q[1]);
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
1
0

[openeuler:openEuler-1.0-LTS] BUILD REGRESSION 1ed85cbd67db3ccd721e15f7459154e8daec22a8
by kernel test robot 08 Apr '24
by kernel test robot 08 Apr '24
08 Apr '24
tree/branch: https://gitee.com/openeuler/kernel.git openEuler-1.0-LTS
branch HEAD: 1ed85cbd67db3ccd721e15f7459154e8daec22a8 !5755 sh: push-switch: Reorder cleanup operations to avoid use-after-free bug
Unverified Error/Warning (likely false positive, please contact us if interested):
arch/arm64/kernel/paravirt.c:40:1: sparse: sparse: symbol '__pcpu_scope_pvsched_vcpu_region' was not declared. Should it be static?
sound/drivers/opl4/opl4_synth.o: warning: objtool: missing symbol for section .text
sound/firewire/bebob/bebob_proc.o: warning: objtool: missing symbol for section .text
Error/Warning ids grouped by kconfigs:
gcc_recent_errors
|-- arm64-allmodconfig
| |-- drivers-dma-pl330.c:warning:dst-may-be-used-uninitialized
| `-- drivers-dma-pl330.c:warning:src-may-be-used-uninitialized
|-- arm64-defconfig
| |-- drivers-dma-pl330.c:warning:dst-may-be-used-uninitialized
| `-- drivers-dma-pl330.c:warning:src-may-be-used-uninitialized
|-- arm64-randconfig-002-20240408
| |-- drivers-dma-pl330.c:warning:dst-may-be-used-uninitialized
| `-- drivers-dma-pl330.c:warning:src-may-be-used-uninitialized
|-- arm64-randconfig-004-20240408
| |-- drivers-dma-pl330.c:warning:dst-may-be-used-uninitialized
| `-- drivers-dma-pl330.c:warning:src-may-be-used-uninitialized
|-- arm64-randconfig-r111-20240331
| |-- :Error:immediate-out-of-range-at-operand-bic-w0-w1
| |-- drivers-isdn-hisax-st5481_usb.c:sparse:sparse:incorrect-type-in-argument-(different-base-types)-expected-unsigned-int-epaddr-got-restricted-__le16-usertype-wIndex
| |-- drivers-net-xen-netback-interface.c:sparse:sparse:dubious:x-y
| |-- drivers-scsi-osd-osd_initiator.c:sparse:sparse:incorrect-type-in-argument-(different-base-types)-expected-unsigned-long-long-usertype-val-got-restricted-osd_id-const-usertype-id
| |-- drivers-scsi-osd-osd_initiator.c:sparse:sparse:incorrect-type-in-argument-(different-base-types)-expected-unsigned-long-long-usertype-val-got-restricted-osd_id-const-usertype-partition
| |-- drivers-scsi-osd-osd_initiator.c:sparse:sparse:incorrect-type-in-assignment-(different-base-types)-expected-restricted-osd_id-usertype-id-got-unsigned-long-long
| |-- drivers-scsi-osd-osd_initiator.c:sparse:sparse:incorrect-type-in-assignment-(different-base-types)-expected-restricted-osd_id-usertype-partition-got-unsigned-long-long
| |-- drivers-staging-gmjstcm-tcm_tis_spi.c:sparse:sparse:dereference-of-noderef-expression
| |-- drivers-staging-gmjstcm-tcm_tis_spi.c:sparse:sparse:symbol-tcm_tis_spi_transfer-was-not-declared.-Should-it-be-static
| |-- mm-pin_mem.c:sparse:sparse:not-addressable
| |-- mm-pin_mem.c:sparse:sparse:symbol-calculate_pin_mem_digest-was-not-declared.-Should-it-be-static
| |-- mm-pin_mem.c:sparse:sparse:symbol-check_redirect_end_valid-was-not-declared.-Should-it-be-static
| |-- mm-pin_mem.c:sparse:sparse:symbol-collect_normal_pages-was-not-declared.-Should-it-be-static
| |-- mm-pin_mem.c:sparse:sparse:symbol-collect_pmd_huge_pages-was-not-declared.-Should-it-be-static
| |-- mm-pin_mem.c:sparse:sparse:symbol-free_pin_pages-was-not-declared.-Should-it-be-static
| |-- mm-pin_mem.c:sparse:sparse:symbol-free_user_map_pages-was-not-declared.-Should-it-be-static
| |-- mm-pin_mem.c:sparse:sparse:symbol-max_pin_pid_num-was-not-declared.-Should-it-be-static
| |-- mm-pin_mem.c:sparse:sparse:symbol-redirect_space_size-was-not-declared.-Should-it-be-static
| |-- mm-pin_mem.c:sparse:sparse:symbol-remap_huge_pmd_pages-was-not-declared.-Should-it-be-static
| |-- mm-pin_mem.c:sparse:sparse:symbol-remap_normal_pages-was-not-declared.-Should-it-be-static
| `-- mm-pin_mem.c:sparse:sparse:symbol-reserve_page_from_buddy-was-not-declared.-Should-it-be-static
|-- arm64-randconfig-r113-20240401
| `-- arch-arm64-kernel-paravirt.c:sparse:sparse:symbol-__pcpu_scope_pvsched_vcpu_region-was-not-declared.-Should-it-be-static
`-- arm64-randconfig-r133-20240401
|-- drivers-clocksource-jcore-pit.c:sparse:sparse:incorrect-type-in-initializer-(different-address-spaces)-expected-void-const-noderef-asn-got-void
|-- drivers-connector-cn_proc.c:sparse:sparse:incorrect-type-in-initializer-(different-address-spaces)-expected-unsigned-int-noderef-asn-got-unsigned-int
|-- drivers-crypto-picoxcell_crypto.c:sparse:sparse:Variable-length-array-is-used.
|-- drivers-firmware-google-coreboot_table.c:sparse:sparse:cast-removes-address-space-asn-of-expression
|-- drivers-firmware-google-coreboot_table.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-const-volatile-noderef-asn-got-void-assigned-ptr_entry
|-- drivers-firmware-google-memconsole-coreboot.c:sparse:sparse:dereference-of-noderef-expression
|-- drivers-firmware-google-memconsole-coreboot.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-addr-got-struct-cbmem_cons-noderef-asn-static-toplevel-cbmem_console
|-- drivers-firmware-google-memconsole-coreboot.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-addr-got-struct-cbmem_cons-noderef-asn-tmp_cbmc
|-- drivers-firmware-google-memconsole-coreboot.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-const-from-got-unsigned-char-noderef-asn
|-- drivers-firmware-google-memconsole-coreboot.c:sparse:sparse:incorrect-type-in-assignment-(different-address-spaces)-expected-struct-cbmem_cons-noderef-asn-static-toplevel-cbmem_console-got-void
|-- drivers-firmware-google-memconsole-coreboot.c:sparse:sparse:incorrect-type-in-assignment-(different-address-spaces)-expected-struct-cbmem_cons-noderef-asn-tmp_cbmc-got-void
|-- drivers-firmware-meson-meson_sm.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-const-got-void-noderef-asn-static-toplevel-sm_shmem_out_base
|-- drivers-firmware-meson-meson_sm.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-got-void-noderef-asn-static-toplevel-sm_shmem_in_base
|-- drivers-irqchip-irq-gic.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-noderef-asn-got-void-noderef-asn-noderef-asn-percpu_base
|-- drivers-irqchip-irq-gic.c:sparse:sparse:incorrect-type-in-initializer-(different-address-spaces)-expected-void-const-noderef-asn-got-void-noderef-asn-noderef-asn
|-- drivers-isdn-i4l-isdn_net.c:sparse:sparse:incorrect-type-in-assignment-(different-address-spaces)-expected-struct-in_device-in_dev-got-struct-in_device-noderef-asn-ip_ptr
|-- drivers-spi-spi-bcm63xx.c:sparse:sparse:cast-removes-address-space-asn-of-expression
|-- drivers-spi-spi-bcm63xx.c:sparse:sparse:incorrect-type-in-assignment-(different-address-spaces)-expected-unsigned-char-const-noderef-usertype-asn-rx_io-got-unsigned-char-const-usertype
|-- drivers-spi-spi-bcm63xx.c:sparse:sparse:incorrect-type-in-assignment-(different-address-spaces)-expected-unsigned-char-noderef-usertype-asn-tx_io-got-unsigned-char-usertype
|-- drivers-spi-spi-lp8841-rtc.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-const-volatile-noderef-asn-addr-got-void-iomem
|-- drivers-spi-spi-lp8841-rtc.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-volatile-noderef-asn-addr-got-void-iomem
|-- drivers-spi-spi-lp8841-rtc.c:sparse:sparse:incorrect-type-in-assignment-(different-address-spaces)-expected-void-iomem-got-void-noderef-asn
|-- drivers-staging-gmjstcm-tcm_tis_spi.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-const-volatile-noderef-asn-addr-got-void-static-noderef-toplevel-asn-reuse_conf_
|-- drivers-staging-gmjstcm-tcm_tis_spi.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-volatile-noderef-asn-addr-got-void
|-- drivers-staging-gmjstcm-tcm_tis_spi.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-volatile-noderef-asn-addr-got-void-static-noderef-toplevel-asn-gpio1_a5
|-- drivers-staging-gmjstcm-tcm_tis_spi.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-volatile-noderef-asn-addr-got-void-static-noderef-toplevel-asn-reuse_conf_reg
|-- drivers-staging-gmjstcm-tcm_tis_spi.c:sparse:sparse:incorrect-type-in-assignment-(different-address-spaces)-expected-void-static-noderef-toplevel-asn-gpio1_a5-got-void-noderef-asn
|-- drivers-staging-gmjstcm-tcm_tis_spi.c:sparse:sparse:incorrect-type-in-assignment-(different-address-spaces)-expected-void-static-noderef-toplevel-asn-reuse_conf_reg-got-void-noderef-asn
|-- drivers-staging-vc04_services-interface-vchiq_arm-vchiq_arm.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-const-noderef-asn-from-got-struct-vchiq_element-const-ad
|-- drivers-staging-vc04_services-interface-vchiq_arm-vchiq_arm.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-const-noderef-asn-from-got-void-const
|-- drivers-w1-slaves-w1_ds28e04.c:sparse:sparse:incorrect-type-in-initializer-(different-address-spaces)-expected-char-const-noderef-asn-got-char-const-buf
|-- drivers-w1-slaves-w1_ds28e04.c:sparse:sparse:incorrect-type-in-initializer-(different-address-spaces)-expected-char-noderef-asn-got-char-buf
|-- fs-proc-etmem_scan.c:sparse:sparse:incorrect-type-in-assignment-(different-address-spaces)-expected-void-noderef-asn-buf-got-void-buf
|-- kernel-events-core.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-noderef-asn-got-int-noderef-pmu_disable_count
|-- kernel-events-core.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-noderef-asn-got-struct-perf_cpu_context-noderef-pmu_cpu_context
|-- kernel-events-core.c:sparse:sparse:incorrect-type-in-initializer-(different-address-spaces)-expected-int-noderef-asn-got-int
|-- kernel-events-core.c:sparse:sparse:incorrect-type-in-initializer-(different-address-spaces)-expected-void-const-noderef-asn-got-int
|-- kernel-events-core.c:sparse:sparse:incorrect-type-in-initializer-(different-address-spaces)-expected-void-const-noderef-asn-got-struct-perf_cpu_context
|-- kernel-printk-printk.c:sparse:sparse:incorrect-type-in-initializer-(different-address-spaces)-expected-int-noderef-asn-got-int
|-- kernel-trace-trace.c:sparse:sparse:incorrect-type-in-initializer-(different-address-spaces)-expected-int-noderef-asn-got-int
|-- mm-vmstat.c:sparse:sparse:incorrect-type-in-initializer-(different-address-spaces)-expected-signed-char-noderef-usertype-asn-got-signed-char
`-- net-openvswitch-actions.c:sparse:sparse:incorrect-type-in-initializer-(different-address-spaces)-expected-int-noderef-asn-got-int
clang_recent_errors
|-- x86_64-buildonly-randconfig-002-20240408
| `-- drivers-media-usb-dvb-usb-v2-gl861.o:warning:objtool:friio_power_ctrl-falls-through-to-next-function-friio_frontend_attach()
|-- x86_64-buildonly-randconfig-003-20240408
| |-- sound-drivers-opl4-opl4_synth.o:warning:objtool:missing-symbol-for-section-.text
| `-- sound-firewire-bebob-bebob_proc.o:warning:objtool:missing-symbol-for-section-.text
`-- x86_64-randconfig-121-20240408
|-- drivers-gpu-drm-radeon-radeon_bios.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-const-src-got-unsigned-char-noderef-usertype-asn-assigned-bios
|-- drivers-pci-controller-hisi-pcie-customer-hisi_pcie_cae.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-noderef-asn-to-got-void
|-- drivers-pci-rom.c:sparse:sparse:incorrect-type-in-return-expression-(different-address-spaces)-expected-void-noderef-asn-got-void
|-- fs-io_uring.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-long-long-noderef-usertype-asn-off_in-got-long-long-usertype-assigned-poff_in
|-- fs-io_uring.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-long-long-noderef-usertype-asn-off_out-got-long-long-usertype-assigned-poff_out
|-- fs-io_uring.c:sparse:sparse:incorrect-type-in-assignment-(different-address-spaces)-expected-struct-file-assigned-file-got-struct-file-noderef-asn
|-- fs-io_uring.c:sparse:sparse:incorrect-type-in-return-expression-(different-address-spaces)-expected-void-noderef-asn-got-struct-io_buffer-assigned-kbuf
|-- kernel-events-core.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-noderef-asn-got-int-noderef-pmu_disable_count
|-- kernel-events-core.c:sparse:sparse:incorrect-type-in-argument-(different-address-spaces)-expected-void-noderef-asn-got-struct-perf_cpu_context-noderef-pmu_cpu_context
|-- kernel-events-core.c:sparse:sparse:incorrect-type-in-initializer-(different-address-spaces)-expected-void-const-noderef-asn-got-int
`-- kernel-events-core.c:sparse:sparse:incorrect-type-in-initializer-(different-address-spaces)-expected-void-const-noderef-asn-got-struct-perf_cpu_context
elapsed time: 723m
configs tested: 36
configs skipped: 147
tested configs:
arm64 alldefconfig gcc
arm64 allmodconfig gcc
arm64 allnoconfig gcc
arm64 defconfig gcc
arm64 randconfig-001-20240408 gcc
arm64 randconfig-002-20240408 gcc
arm64 randconfig-003-20240408 gcc
arm64 randconfig-004-20240408 gcc
x86_64 allnoconfig clang
x86_64 allyesconfig clang
x86_64 buildonly-randconfig-001-20240408 clang
x86_64 buildonly-randconfig-002-20240408 clang
x86_64 buildonly-randconfig-003-20240408 clang
x86_64 buildonly-randconfig-004-20240408 clang
x86_64 buildonly-randconfig-005-20240408 clang
x86_64 buildonly-randconfig-006-20240408 clang
x86_64 defconfig gcc
x86_64 randconfig-001-20240408 gcc
x86_64 randconfig-002-20240408 clang
x86_64 randconfig-003-20240408 clang
x86_64 randconfig-004-20240408 clang
x86_64 randconfig-005-20240408 clang
x86_64 randconfig-006-20240408 gcc
x86_64 randconfig-011-20240408 clang
x86_64 randconfig-012-20240408 clang
x86_64 randconfig-013-20240408 gcc
x86_64 randconfig-014-20240408 clang
x86_64 randconfig-015-20240408 clang
x86_64 randconfig-016-20240408 clang
x86_64 randconfig-071-20240408 gcc
x86_64 randconfig-072-20240408 clang
x86_64 randconfig-073-20240408 clang
x86_64 randconfig-074-20240408 gcc
x86_64 randconfig-075-20240408 gcc
x86_64 randconfig-076-20240408 clang
x86_64 rhel-8.3-rust clang
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
1
0
From: Jingxian He <hejingxian(a)huawei.com>
Add cvm feature patches:
1. add cvm host feature
2. enable pmu phys irq inject for cvm
3. add bounce buffer feature for cvm guest
4. add lpi support for cvm guest
arch/arm64/configs/defconfig | 2 +
arch/arm64/configs/openeuler_defconfig | 2 +
arch/arm64/include/asm/cvm_guest.h | 21 +
arch/arm64/include/asm/kvm_emulate.h | 14 +
arch/arm64/include/asm/kvm_host.h | 12 +
arch/arm64/include/asm/kvm_tmi.h | 376 +++++++++++
arch/arm64/include/asm/kvm_tmm.h | 72 +++
arch/arm64/kvm/Kconfig | 16 +
arch/arm64/kvm/Makefile | 5 +
arch/arm64/kvm/arch_timer.c | 104 +++-
arch/arm64/kvm/arm.c | 155 ++++-
arch/arm64/kvm/cvm.c | 824 +++++++++++++++++++++++++
arch/arm64/kvm/cvm_exit.c | 229 +++++++
arch/arm64/kvm/cvm_guest.c | 90 +++
arch/arm64/kvm/guest.c | 8 +
arch/arm64/kvm/hyp/vgic-v3-sr.c | 19 +
arch/arm64/kvm/mmio.c | 17 +-
arch/arm64/kvm/mmu.c | 7 +
arch/arm64/kvm/pmu-emul.c | 9 +
arch/arm64/kvm/psci.c | 12 +-
arch/arm64/kvm/reset.c | 10 +
arch/arm64/kvm/tmi.c | 148 +++++
arch/arm64/kvm/vgic/vgic-v3.c | 16 +-
arch/arm64/kvm/vgic/vgic.c | 52 +-
arch/arm64/mm/mmu.c | 11 +
arch/arm64/mm/pageattr.c | 9 +-
drivers/irqchip/irq-gic-v3-its.c | 228 ++++++-
drivers/perf/arm_pmu.c | 17 +
include/kvm/arm_arch_timer.h | 4 +
include/linux/kvm_host.h | 21 +
include/linux/perf/arm_pmu.h | 3 +
include/linux/swiotlb.h | 13 +
include/uapi/linux/kvm.h | 29 +
kernel/dma/direct.c | 39 ++
kernel/dma/swiotlb.c | 86 ++-
virt/kvm/kvm_main.c | 7 +-
36 files changed, 2646 insertions(+), 41 deletions(-)
create mode 100644 arch/arm64/include/asm/cvm_guest.h
create mode 100644 arch/arm64/include/asm/kvm_tmi.h
create mode 100644 arch/arm64/include/asm/kvm_tmm.h
create mode 100644 arch/arm64/kvm/cvm.c
create mode 100644 arch/arm64/kvm/cvm_exit.c
create mode 100644 arch/arm64/kvm/cvm_guest.c
create mode 100644 arch/arm64/kvm/tmi.c
--
2.33.0
1
0

08 Apr '24
From: Daniil Dulov <d.dulov(a)aladdin.ru>
ainline inclusion
from mainline-v6.8-rc6
commit 6ea38e2aeb72349cad50e38899b0ba6fbcb2af3d
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9E3E9
CVE: CVE-2024-26736
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit?i…
--------------------------------
The max length of volume->vid value is 20 characters.
So increase idbuf[] size up to 24 to avoid overflow.
Found by Linux Verification Center (linuxtesting.org) with SVACE.
[DH: Actually, it's 20 + NUL, so increase it to 24 and use snprintf()]
Fixes: d2ddc776a458 ("afs: Overhaul volume and server record caching and fileserver rotation")
Signed-off-by: Daniil Dulov <d.dulov(a)aladdin.ru>
Signed-off-by: David Howells <dhowells(a)redhat.com>
Link: https://lore.kernel.org/r/20240211150442.3416-1-d.dulov@aladdin.ru/ # v1
Link: https://lore.kernel.org/r/20240212083347.10742-1-d.dulov@aladdin.ru/ # v2
Link: https://lore.kernel.org/r/20240219143906.138346-3-dhowells@redhat.com
Signed-off-by: Christian Brauner <brauner(a)kernel.org>
Signed-off-by: Zhihao Cheng <chengzhihao1(a)huawei.com>
---
fs/afs/volume.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index f84194b791d3..fb19c69284ab 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -302,7 +302,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
{
struct afs_server_list *new, *old, *discard;
struct afs_vldb_entry *vldb;
- char idbuf[16];
+ char idbuf[24];
int ret, idsz;
_enter("");
@@ -310,7 +310,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
/* We look up an ID by passing it as a decimal string in the
* operation's name parameter.
*/
- idsz = sprintf(idbuf, "%llu", volume->vid);
+ idsz = snprintf(idbuf, sizeof(idbuf), "%llu", volume->vid);
vldb = afs_vl_lookup_vldb(volume->cell, key, idbuf, idsz);
if (IS_ERR(vldb)) {
--
2.31.1
2
1

[openeuler:OLK-6.6 3985/7311] drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:12668:21: sparse: sparse: symbol 'hclge_ops' was not declared. Should it be static?
by kernel test robot 08 Apr '24
by kernel test robot 08 Apr '24
08 Apr '24
tree: https://gitee.com/openeuler/kernel.git OLK-6.6
head: 05607873db411ec3c614313b43cec60138c26a99
commit: 5a74bf5a90b365913d0d91ec50463354d3295210 [3985/7311] net: hns3: add support customized exception handling interfaces
config: loongarch-randconfig-r113-20240408 (https://download.01.org/0day-ci/archive/20240408/202404082139.0v0lXo9X-lkp@…)
compiler: loongarch64-linux-gcc (GCC) 13.2.0
reproduce: (https://download.01.org/0day-ci/archive/20240408/202404082139.0v0lXo9X-lkp@…)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp(a)intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202404082139.0v0lXo9X-lkp@intel.com/
sparse warnings: (new ones prefixed by >>)
>> drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:12668:21: sparse: sparse: symbol 'hclge_ops' was not declared. Should it be static?
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c: note: in included file (through include/linux/mmzone.h, include/linux/gfp.h, include/linux/slab.h, ...):
include/linux/page-flags.h:245:46: sparse: sparse: self-comparison always evaluates to false
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:5264:31: sparse: sparse: context imbalance in 'hclge_sync_fd_user_def_cfg' - unexpected unlock
vim +/hclge_ops +12668 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
12667
12668 struct hnae3_ae_ops hclge_ops = {
12669 .init_ae_dev = hclge_init_ae_dev,
12670 .uninit_ae_dev = hclge_uninit_ae_dev,
12671 .reset_prepare = hclge_reset_prepare_general,
12672 .reset_done = hclge_reset_done,
12673 .init_client_instance = hclge_init_client_instance,
12674 .uninit_client_instance = hclge_uninit_client_instance,
12675 .map_ring_to_vector = hclge_map_ring_to_vector,
12676 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12677 .get_vector = hclge_get_vector,
12678 .put_vector = hclge_put_vector,
12679 .set_promisc_mode = hclge_set_promisc_mode,
12680 .request_update_promisc_mode = hclge_request_update_promisc_mode,
12681 .set_loopback = hclge_set_loopback,
12682 .start = hclge_ae_start,
12683 .stop = hclge_ae_stop,
12684 .client_start = hclge_client_start,
12685 .client_stop = hclge_client_stop,
12686 .get_status = hclge_get_status,
12687 .get_ksettings_an_result = hclge_get_ksettings_an_result,
12688 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12689 .get_media_type = hclge_get_media_type,
12690 .check_port_speed = hclge_check_port_speed,
12691 .get_fec_stats = hclge_get_fec_stats,
12692 .get_fec = hclge_get_fec,
12693 .set_fec = hclge_set_fec,
12694 .get_rss_key_size = hclge_comm_get_rss_key_size,
12695 .get_rss = hclge_get_rss,
12696 .set_rss = hclge_set_rss,
12697 .set_rss_tuple = hclge_set_rss_tuple,
12698 .get_rss_tuple = hclge_get_rss_tuple,
12699 .get_tc_size = hclge_get_tc_size,
12700 .get_mac_addr = hclge_get_mac_addr,
12701 .set_mac_addr = hclge_set_mac_addr,
12702 .do_ioctl = hclge_do_ioctl,
12703 .add_uc_addr = hclge_add_uc_addr,
12704 .rm_uc_addr = hclge_rm_uc_addr,
12705 .add_mc_addr = hclge_add_mc_addr,
12706 .rm_mc_addr = hclge_rm_mc_addr,
12707 .set_autoneg = hclge_set_autoneg,
12708 .get_autoneg = hclge_get_autoneg,
12709 .restart_autoneg = hclge_restart_autoneg,
12710 .halt_autoneg = hclge_halt_autoneg,
12711 .get_pauseparam = hclge_get_pauseparam,
12712 .set_pauseparam = hclge_set_pauseparam,
12713 .set_mtu = hclge_set_mtu,
12714 .reset_queue = hclge_reset_tqp,
12715 .get_stats = hclge_get_stats,
12716 .get_mac_stats = hclge_get_mac_stat,
12717 .update_stats = hclge_update_stats,
12718 .get_strings = hclge_get_strings,
12719 .get_sset_count = hclge_get_sset_count,
12720 .get_fw_version = hclge_get_fw_version,
12721 .get_mdix_mode = hclge_get_mdix_mode,
12722 .enable_vlan_filter = hclge_enable_vlan_filter,
12723 .set_vlan_filter = hclge_set_vlan_filter,
12724 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12725 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12726 .reset_event = hclge_reset_event,
12727 .get_reset_level = hclge_get_reset_level,
12728 .set_default_reset_request = hclge_set_def_reset_request,
12729 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12730 .set_channels = hclge_set_channels,
12731 .get_channels = hclge_get_channels,
12732 .get_regs_len = hclge_get_regs_len,
12733 .get_regs = hclge_get_regs,
12734 .set_led_id = hclge_set_led_id,
12735 .get_link_mode = hclge_get_link_mode,
12736 .add_fd_entry = hclge_add_fd_entry,
12737 .del_fd_entry = hclge_del_fd_entry,
12738 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12739 .get_fd_rule_info = hclge_get_fd_rule_info,
12740 .get_fd_all_rules = hclge_get_all_rules,
12741 .enable_fd = hclge_enable_fd,
12742 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12743 .dbg_read_cmd = hclge_dbg_read_cmd,
12744 .handle_hw_ras_error = hclge_handle_hw_ras_error,
12745 .get_hw_reset_stat = hclge_get_hw_reset_stat,
12746 .ae_dev_resetting = hclge_ae_dev_resetting,
12747 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12748 .set_gro_en = hclge_gro_en,
12749 .get_global_queue_id = hclge_covert_handle_qid_global,
12750 .set_timer_task = hclge_set_timer_task,
12751 .mac_connect_phy = hclge_mac_connect_phy,
12752 .mac_disconnect_phy = hclge_mac_disconnect_phy,
12753 .get_vf_config = hclge_get_vf_config,
12754 .set_vf_link_state = hclge_set_vf_link_state,
12755 .set_vf_spoofchk = hclge_set_vf_spoofchk,
12756 .set_vf_trust = hclge_set_vf_trust,
12757 .set_vf_rate = hclge_set_vf_rate,
12758 .set_vf_mac = hclge_set_vf_mac,
12759 .get_module_eeprom = hclge_get_module_eeprom,
12760 .get_cmdq_stat = hclge_get_cmdq_stat,
12761 .add_cls_flower = hclge_add_cls_flower,
12762 .del_cls_flower = hclge_del_cls_flower,
12763 .cls_flower_active = hclge_is_cls_flower_active,
12764 .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12765 .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12766 .set_tx_hwts_info = hclge_ptp_set_tx_info,
12767 .get_rx_hwts = hclge_ptp_get_rx_hwts,
12768 .get_ts_info = hclge_ptp_get_ts_info,
12769 .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
12770 .clean_vf_config = hclge_clean_vport_config,
12771 .get_dscp_prio = hclge_get_dscp_prio,
12772 .get_wol = hclge_get_wol,
12773 .set_wol = hclge_set_wol,
12774 .priv_ops = hclge_ext_ops_handle,
12775 };
12776
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
1
0
From: Jingxian He <hejingxian(a)huawei.com>
hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
CVE: N/A
------------
Add host support for Confidential VMs:
1. Add new kvm_type for cvm.
2. Init cvm related data while user create vm with cvm type.
3. Add cvm hypervisor while run in sel2 which named tmm.
4. Kvm call tmm interface to create cvm stage2 pagetable and run cvm.
Signed-off-by: Jingxian He <hejingxian(a)huawei.com>
---
arch/arm64/configs/defconfig | 1 +
arch/arm64/configs/openeuler_defconfig | 1 +
arch/arm64/include/asm/kvm_emulate.h | 14 +
arch/arm64/include/asm/kvm_host.h | 12 +
arch/arm64/include/asm/kvm_tmi.h | 373 +++++++++++
arch/arm64/include/asm/kvm_tmm.h | 72 +++
arch/arm64/kvm/Kconfig | 8 +
arch/arm64/kvm/Makefile | 4 +
arch/arm64/kvm/arch_timer.c | 104 +++-
arch/arm64/kvm/arm.c | 131 +++-
arch/arm64/kvm/cvm.c | 824 +++++++++++++++++++++++++
arch/arm64/kvm/cvm_exit.c | 229 +++++++
arch/arm64/kvm/guest.c | 8 +
arch/arm64/kvm/hyp/vgic-v3-sr.c | 19 +
arch/arm64/kvm/mmio.c | 17 +-
arch/arm64/kvm/mmu.c | 7 +
arch/arm64/kvm/psci.c | 12 +-
arch/arm64/kvm/reset.c | 10 +
arch/arm64/kvm/tmi.c | 148 +++++
arch/arm64/kvm/vgic/vgic-v3.c | 16 +-
arch/arm64/kvm/vgic/vgic.c | 52 +-
include/kvm/arm_arch_timer.h | 4 +
include/linux/kvm_host.h | 21 +
include/uapi/linux/kvm.h | 29 +
virt/kvm/kvm_main.c | 7 +-
25 files changed, 2105 insertions(+), 18 deletions(-)
create mode 100644 arch/arm64/include/asm/kvm_tmi.h
create mode 100644 arch/arm64/include/asm/kvm_tmm.h
create mode 100644 arch/arm64/kvm/cvm.c
create mode 100644 arch/arm64/kvm/cvm_exit.c
create mode 100644 arch/arm64/kvm/tmi.c
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index d025bafcc..ace2bf4ad 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -110,6 +110,7 @@ CONFIG_ACPI_APEI_MEMORY_FAILURE=y
CONFIG_ACPI_APEI_EINJ=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
+CONFIG_CVM_HOST=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 5ad5e4378..e298ca7e5 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -762,6 +762,7 @@ CONFIG_ACPI_PPTT=y
CONFIG_IRQ_BYPASS_MANAGER=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
+CONFIG_CVM_HOST=y
CONFIG_HAVE_KVM_IRQCHIP=y
CONFIG_HAVE_KVM_IRQFD=y
CONFIG_HAVE_KVM_IRQ_ROUTING=y
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index fb3e3f613..ab1aebd1f 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -556,4 +556,18 @@ static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
return test_bit(feature, vcpu->arch.features);
}
+#ifdef CONFIG_CVM_HOST
+static inline bool kvm_is_cvm(struct kvm *kvm)
+{
+ if (static_branch_unlikely(&kvm_cvm_is_available)) {
+ return kvm->arch.is_cvm;
+ }
+ return false;
+}
+
+static inline enum cvm_state kvm_cvm_state(struct kvm *kvm)
+{
+ return READ_ONCE(kvm->arch.cvm.state);
+}
+#endif
#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 8bb67dfb9..01b8f9331 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -26,6 +26,9 @@
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/thread_info.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmm.h>
+#endif
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -127,6 +130,11 @@ struct kvm_arch {
cpumask_t *dvm_cpumask; /* Union of all vcpu's cpus_ptr */
u64 lsudvmbm_el2;
#endif
+
+#ifdef CONFIG_CVM_HOST
+ struct cvm cvm;
+ bool is_cvm;
+#endif
};
struct kvm_vcpu_fault_info {
@@ -405,6 +413,10 @@ struct kvm_vcpu_arch {
cpumask_t *cpus_ptr;
cpumask_t *pre_cpus_ptr;
#endif
+
+#ifdef CONFIG_CVM_HOST
+ struct cvm_tec tec;
+#endif
};
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
diff --git a/arch/arm64/include/asm/kvm_tmi.h b/arch/arm64/include/asm/kvm_tmi.h
new file mode 100644
index 000000000..554b3e439
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_tmi.h
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+#ifndef __TMM_TMI_H
+#define __TMM_TMI_H
+#include <linux/kvm_host.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_pgtable.h>
+#include <linux/virtio_ring.h>
+
+#define GRANULE_SIZE 4096
+
+#define NO_NUMA -1
+
+#define TMM_TTT_LEVEL_3 3
+
+#ifdef CONFIG_CVM_HOST_FVP_PLAT
+#define CVM_MEM_BASE ULL(0x8800000000) /* choose FVP platform to run cVM */
+#define VQ_NUM 3
+#else
+#define CVM_MEM_BASE ULL(0x800000000) /* choose qemu platform to run cVM */
+#define VQ_NUM 3
+#endif
+
+#define MEM_SEG_NUMS 2
+
+/* define in QEMU hw/arm/virt.c */
+#define VIRT_PCIE_MMIO 0x10000000 /* 256MB */
+#define VIRT_PCIE_MMIO_SIZE 0x1000000 /* 16MB */
+#define VIRT_HIGH_PCIE_ECAM 0x8000000000 /* 512GB */
+#define VIRT_HIGH_PCIE_ECAM_SIZE 0x12000000 /* 288MB */
+
+/* TMI error codes. */
+#define TMI_SUCCESS 0
+#define TMI_ERROR_INPUT 1
+#define TMI_ERROR_MEMORY 2
+#define TMI_ERROR_ALIAS 3
+#define TMI_ERROR_IN_USE 4
+#define TMI_ERROR_CVM_STATE 5
+#define TMI_ERROR_OWNER 6
+#define TMI_ERROR_TEC 7
+#define TMI_ERROR_TTT_WALK 8
+#define TMI_ERROR_TTT_ENTRY 9
+#define TMI_ERROR_NOT_SUPPORTED 10
+#define TMI_ERROR_INTERNAL 11
+#define TMI_ERROR_CVM_POWEROFF 12
+
+#define TMI_RETURN_STATUS(ret) ((ret) & 0xFF)
+#define TMI_RETURN_INDEX(ret) (((ret) >> 8) & 0xFF)
+
+#define TMI_FEATURE_REGISTER_0_S2SZ GENMASK(7, 0)
+#define TMI_FEATURE_REGISTER_0_LPA2 BIT(8)
+#define TMI_FEATURE_REGISTER_0_SVE_EN BIT(9)
+#define TMI_FEATURE_REGISTER_0_SVE_VL GENMASK(13, 10)
+#define TMI_FEATURE_REGISTER_0_NUM_BPS GENMASK(17, 14)
+#define TMI_FEATURE_REGISTER_0_NUM_WPS GENMASK(21, 18)
+#define TMI_FEATURE_REGISTER_0_PMU_EN BIT(22)
+#define TMI_FEATURE_REGISTER_0_PMU_NUM_CTRS GENMASK(27, 23)
+#define TMI_FEATURE_REGISTER_0_HASH_SHA_256 BIT(28)
+#define TMI_FEATURE_REGISTER_0_HASH_SHA_512 BIT(29)
+
+#define TMI_CVM_PARAM_FLAG_LPA2 BIT(0)
+#define TMI_CVM_PARAM_FLAG_SVE BIT(1)
+#define TMI_CVM_PARAM_FLAG_PMU BIT(2)
+
+/*
+ * Many of these fields are smaller than u64 but all fields have u64
+ * alignment, so use u64 to ensure correct alignment.
+ */
+typedef struct tmi_cvm_params {
+ u64 flags;
+ u64 s2sz;
+ u64 sve_vl;
+ u64 num_bps;
+ u64 num_wps;
+ u64 pmu_num_cnts;
+ u64 measurement_algo;
+ u64 vmid;
+ u64 ns_vtcr;
+ u64 vttbr_el2;
+ u64 ttt_base;
+ s64 ttt_level_start;
+ u64 ttt_num_start;
+ u8 rpv[64]; /* Bits 512 */
+} tmi_cvm_params_t;
+
+#define TMI_NOT_RUNNABLE 0
+#define TMI_RUNNABLE 1
+
+/*
+ * The number of GPRs (starting from X0) that are
+ * configured by the host when a TEC is created.
+ */
+#define TEC_CREATE_NR_GPRS (8U)
+
+struct tmi_tec_params {
+ uint64_t gprs[TEC_CREATE_NR_GPRS];
+ uint64_t pc;
+ uint64_t flags;
+ uint64_t ram_size;
+};
+
+#define TEC_ENTRY_FLAG_EMUL_MMIO (1UL << 0U)
+#define TEC_ENTRY_FLAG_INJECT_SEA (1UL << 1U)
+#define TEC_ENTRY_FLAG_TRAP_WFI (1UL << 2U)
+#define TEC_ENTRY_FLAG_TRAP_WFE (1UL << 3U)
+
+#define TMI_EXIT_SYNC 0
+#define TMI_EXIT_IRQ 1
+#define TMI_EXIT_FIQ 2
+#define TMI_EXIT_PSCI 3
+#define TMI_EXIT_HOST_CALL 5
+#define TMI_EXIT_SERROR 6
+
+/*
+ * The number of GPRs (starting from X0) per voluntary exit context.
+ * Per SMCCC.
+ */
+ #define TEC_EXIT_NR_GPRS (31U)
+
+/* Maximum number of Interrupt Controller List Registers. */
+#define TEC_GIC_NUM_LRS (16U)
+
+struct tmi_tec_entry {
+ uint64_t flags;
+ uint64_t gprs[TEC_EXIT_NR_GPRS];
+ uint64_t gicv3_lrs[TEC_GIC_NUM_LRS];
+ uint64_t gicv3_hcr;
+};
+
+struct tmi_tec_exit {
+ uint64_t exit_reason;
+ uint64_t esr;
+ uint64_t far;
+ uint64_t hpfar;
+ uint64_t gprs[TEC_EXIT_NR_GPRS];
+ uint64_t gicv3_hcr;
+ uint64_t gicv3_lrs[TEC_GIC_NUM_LRS];
+ uint64_t gicv3_misr;
+ uint64_t gicv3_vmcr;
+ uint64_t cntv_ctl;
+ uint64_t cntv_cval;
+ uint64_t cntp_ctl;
+ uint64_t cntp_cval;
+ uint64_t imm;
+};
+
+struct tmi_tec_run {
+ struct tmi_tec_entry tec_entry;
+ struct tmi_tec_exit tec_exit;
+};
+
+#define TMI_FNUM_MIN_VALUE U(0x150)
+#define TMI_FNUM_MAX_VALUE U(0x18F)
+
+/******************************************************************************
+ * Bit definitions inside the function id as per the SMC calling convention
+ ******************************************************************************/
+#define FUNCID_TYPE_SHIFT 31
+#define FUNCID_CC_SHIFT 30
+#define FUNCID_OEN_SHIFT 24
+#define FUNCID_NUM_SHIFT 0
+
+#define FUNCID_TYPE_MASK 0x1
+#define FUNCID_CC_MASK 0x1
+#define FUNCID_OEN_MASK 0x3f
+#define FUNCID_NUM_MASK 0xffff
+
+#define FUNCID_TYPE_WIDTH 1
+#define FUNCID_CC_WIDTH 1
+#define FUNCID_OEN_WIDTH 6
+#define FUNCID_NUM_WIDTH 16
+
+#define SMC_64 1
+#define SMC_32 0
+#define SMC_TYPE_FAST 1
+#define SMC_TYPE_STD 0
+
+/*****************************************************************************
+ * Owning entity number definitions inside the function id as per the SMC
+ * calling convention
+ *****************************************************************************/
+#define OEN_ARM_START 0
+#define OEN_ARM_END 0
+#define OEN_CPU_START 1
+#define OEN_CPU_END 1
+#define OEN_SIP_START 2
+#define OEN_SIP_END 2
+#define OEN_OEM_START 3
+#define OEN_OEM_END 3
+#define OEN_STD_START 4 /* Standard Calls */
+#define OEN_STD_END 4
+#define OEN_TAP_START 48 /* Trusted Applications */
+#define OEN_TAP_END 49
+#define OEN_TOS_START 50 /* Trusted OS */
+#define OEN_TOS_END 63
+#define OEN_LIMIT 64
+
+/* Get TMI fastcall std FID from funtion number */
+#define TMI_FID(smc_cc, func_num) \
+ ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT) | \
+ ((smc_cc) << FUNCID_CC_SHIFT) | \
+ (OEN_STD_START << FUNCID_OEN_SHIFT) | \
+ ((func_num) << FUNCID_NUM_SHIFT) )
+
+#define U(_x) (_x##U)
+
+/*
+ * SMC_TMM_INIT_COMPLETE is the only function in the TMI that originates from
+ * the CVM world and is handled by the SPMD. The remaining functions are
+ * always invoked by the Normal world, forward by SPMD and handled by the
+ * TMM.
+ */
+#define TMI_FNUM_VERSION U(0x260)
+#define TMI_FNUM_MEM_ALLOC U(0x261)
+#define TMI_FNUM_MEM_FREE U(0x262)
+#define TMI_FNUM_DATA_CREATE U(0x263)
+#define TMI_FNUM_DATA_DESTROY U(0x265)
+#define TMI_FNUM_CVM_ACTIVATE U(0x267)
+#define TMI_FNUM_CVM_CREATE U(0x268)
+#define TMI_FNUM_CVM_DESTROY U(0x269)
+#define TMI_FNUM_TEC_CREATE U(0x27A)
+#define TMI_FNUM_TEC_DESTROY U(0x27B)
+#define TMI_FNUM_TEC_ENTER U(0x27C)
+#define TMI_FNUM_TTT_CREATE U(0x27D)
+#define TMI_FNUM_TTT_DESTROY U(0x27E)
+#define TMI_FNUM_TTT_MAP_UNPROTECTED U(0x27F)
+#define TMI_FNUM_TTT_MAP_PROTECTED U(0x280)
+#define TMI_FNUM_TTT_UNMAP_UNPROTECTED U(0x282)
+#define TMI_FNUM_TTT_UNMAP_PROTECTED U(0x283)
+#define TMI_FNUM_PSCI_COMPLETE U(0x284)
+#define TMI_FNUM_FEATURES U(0x285)
+#define TMI_FNUM_TTT_MAP_RANGE U(0x286)
+#define TMI_FNUM_TTT_UNMAP_RANGE U(0x287)
+
+/* TMI SMC64 PIDs handled by the SPMD */
+#define TMI_TMM_VESION TMI_FID(SMC_64, TMI_FNUM_VERSION)
+#define TMI_TMM_DATA_CREATE TMI_FID(SMC_64, TMI_FNUM_DATA_CREATE)
+#define TMI_TMM_DATA_DESTROY TMI_FID(SMC_64, TMI_FNUM_DATA_DESTROY)
+#define TMI_TMM_CVM_ACTIVATE TMI_FID(SMC_64, TMI_FNUM_CVM_ACTIVATE)
+#define TMI_TMM_CVM_CREATE TMI_FID(SMC_64, TMI_FNUM_CVM_CREATE)
+#define TMI_TMM_CVM_DESTROY TMI_FID(SMC_64, TMI_FNUM_CVM_DESTROY)
+#define TMI_TMM_TEC_CREATE TMI_FID(SMC_64, TMI_FNUM_TEC_CREATE)
+#define TMI_TMM_TEC_DESTROY TMI_FID(SMC_64, TMI_FNUM_TEC_DESTROY)
+#define TMI_TMM_TEC_ENTER TMI_FID(SMC_64, TMI_FNUM_TEC_ENTER)
+#define TMI_TMM_TTT_CREATE TMI_FID(SMC_64, TMI_FNUM_TTT_CREATE)
+#define TMI_TMM_TTT_DESTROY TMI_FID(SMC_64, TMI_FNUM_TTT_DESTROY)
+#define TMI_TMM_TTT_MAP_UNPROTECTED TMI_FID(SMC_64, TMI_FNUM_TTT_MAP_UNPROTECTED)
+#define TMI_TMM_TTT_MAP_PROTECTED TMI_FID(SMC_64, TMI_FNUM_TTT_MAP_PROTECTED)
+#define TMI_TMM_TTT_UNMAP_UNPROTECTED TMI_FID(SMC_64, TMI_FNUM_TTT_UNMAP_UNPROTECTED)
+#define TMI_TMM_TTT_UNMAP_PROTECTED TMI_FID(SMC_64, TMI_FNUM_TTT_UNMAP_PROTECTED)
+#define TMI_TMM_PSCI_COMPLETE TMI_FID(SMC_64, TMI_FNUM_PSCI_COMPLETE)
+#define TMI_TMM_FEATURES TMI_FID(SMC_64, TMI_FNUM_FEATURES)
+#define TMI_TMM_MEM_ALLOC TMI_FID(SMC_64, TMI_FNUM_MEM_ALLOC)
+#define TMI_TMM_MEM_FREE TMI_FID(SMC_64, TMI_FNUM_MEM_FREE)
+#define TMI_TMM_TTT_MAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_MAP_RANGE)
+#define TMI_TMM_TTT_UNMAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_UNMAP_RANGE)
+
+#define TMI_ABI_VERSION_GET_MAJOR(_version) ((_version) >> 16)
+#define TMI_ABI_VERSION_GET_MINOR(_version) ((_version) & 0xFFFF)
+
+#define TMI_ABI_VERSION_MAJOR U(0x0)
+
+/* KVM_CAP_ARM_TMM on VM fd */
+#define KVM_CAP_ARM_TMM_CONFIG_CVM_HOST 0
+#define KVM_CAP_ARM_TMM_CREATE_CVM 1
+#define KVM_CAP_ARM_TMM_INIT_IPA_CVM 2
+#define KVM_CAP_ARM_TMM_POPULATE_CVM 3
+#define KVM_CAP_ARM_TMM_ACTIVATE_CVM 4
+
+#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA256 0
+#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA512 1
+
+#define KVM_CAP_ARM_TMM_RPV_SIZE 64
+
+/* List of configuration items accepted for KVM_CAP_ARM_TMM_CONFIG_CVM_HOST */
+#define KVM_CAP_ARM_TMM_CFG_RPV 0
+#define KVM_CAP_ARM_TMM_CFG_HASH_ALGO 1
+#define KVM_CAP_ARM_TMM_CFG_SVE 2
+#define KVM_CAP_ARM_TMM_CFG_DBG 3
+#define KVM_CAP_ARM_TMM_CFG_PMU 4
+
+DECLARE_STATIC_KEY_FALSE(kvm_cvm_is_available);
+DECLARE_STATIC_KEY_FALSE(kvm_cvm_is_enable);
+
+struct kvm_cap_arm_tmm_config_item {
+ __u32 cfg;
+ union {
+ /* cfg == KVM_CAP_ARM_TMM_CFG_RPV */
+ struct {
+ __u8 rpv[KVM_CAP_ARM_TMM_RPV_SIZE];
+ };
+
+ /* cfg == KVM_CAP_ARM_TMM_CFG_HASH_ALGO */
+ struct {
+ __u32 hash_algo;
+ };
+
+ /* cfg == KVM_CAP_ARM_TMM_CFG_SVE */
+ struct {
+ __u32 sve_vq;
+ };
+
+ /* cfg == KVM_CAP_ARM_TMM_CFG_DBG */
+ struct {
+ __u32 num_brps;
+ __u32 num_wrps;
+ };
+
+ /* cfg == KVM_CAP_ARM_TMM_CFG_PMU */
+ struct {
+ __u32 num_pmu_cntrs;
+ };
+ /* Fix the size of the union */
+ __u8 reserved[256];
+ };
+};
+
+enum tmi_tmm_mem_type {
+ TMM_MEM_TYPE_RD,
+ TMM_MEM_TYPE_TEC,
+ TMM_MEM_TYPE_TTT,
+ TMM_MEM_TYPE_CVM_PA,
+};
+
+enum tmi_tmm_map_size {
+ TMM_MEM_MAP_SIZE_4K,
+ TMM_MEM_MAP_SIZE_2M,
+ TMM_MEM_MAP_SIZE_1G,
+ TMM_MEM_MAP_SIZE_MAX,
+};
+
+static inline bool tmm_is_addr_ttt_level_aligned(uint64_t addr, int level)
+{
+ uint64_t mask = (1 << (12 + 9 * (3 - level))) - 1;
+ return (addr & mask) == 0;
+}
+
+u64 phys_to_cvm_phys(u64 phys);
+
+u64 tmi_version(void);
+u64 tmi_data_create(u64 data, u64 rd, u64 map_addr, u64 src, u64 level);
+u64 tmi_data_destroy(u64 rd, u64 map_addr, u64 level);
+u64 tmi_cvm_activate(u64 rd);
+u64 tmi_cvm_create(u64 rd, u64 params_ptr);
+u64 tmi_cvm_destroy(u64 rd);
+u64 tmi_tec_create(u64 tec, u64 rd, u64 mpidr, u64 params_ptr);
+u64 tmi_tec_destroy(u64 tec);
+u64 tmi_tec_enter(u64 tec, u64 run_ptr);
+u64 tmi_ttt_create(u64 ttt, u64 rd, u64 map_addr, u64 level);
+u64 tmi_ttt_destroy(u64 ttt, u64 rd, u64 map_addr, u64 level);
+u64 tmi_ttt_map_unprotected(u64 rd, u64 map_addr, u64 level, u64 ttte);
+u64 tmi_ttt_unmap_unprotected(u64 rd, u64 map_addr, u64 level, u64 ns);
+u64 tmi_ttt_unmap_protected(u64 rd, u64 map_addr, u64 level);
+u64 tmi_psci_complete(u64 calling_tec, u64 target_tec);
+u64 tmi_features(u64 index);
+u64 tmi_ttt_map_range(u64 rd, u64 map_addr, u64 size, u64 cur_node, u64 target_node);
+u64 tmi_ttt_unmap_range(u64 rd, u64 map_addr, u64 size, u64 node_id);
+
+u64 tmi_mem_alloc(u64 rd, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type,
+ enum tmi_tmm_map_size tmm_map_size);
+u64 tmi_mem_free(u64 pa, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type,
+ enum tmi_tmm_map_size tmm_map_size);
+
+void kvm_cvm_vcpu_put(struct kvm_vcpu *vcpu);
+int kvm_load_user_data(struct kvm *kvm, unsigned long arg);
+unsigned long cvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu,
+ unsigned long target_affinity, unsigned long lowest_affinity_level);
+int kvm_cvm_vcpu_set_events(struct kvm_vcpu *vcpu,
+ bool serror_pending, bool ext_dabt_pending);
+
+#endif
diff --git a/arch/arm64/include/asm/kvm_tmm.h b/arch/arm64/include/asm/kvm_tmm.h
new file mode 100644
index 000000000..41383494f
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_tmm.h
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+#ifndef __ASM_KVM_TMM_H
+#define __ASM_KVM_TMM_H
+
+#include <uapi/linux/kvm.h>
+
+enum cvm_state {
+ CVM_STATE_NONE,
+ CVM_STATE_NEW,
+ CVM_STATE_ACTIVE,
+ CVM_STATE_DYING
+};
+
+struct cvm {
+ enum cvm_state state;
+ u32 cvm_vmid;
+ u64 rd;
+ u64 loader_start;
+ u64 initrd_start;
+ u64 initrd_size;
+ u64 ram_size;
+ struct kvm_numa_info numa_info;
+ struct tmi_cvm_params *params;
+};
+
+/*
+ * struct cvm_tec - Additional per VCPU data for a CVM
+ */
+struct cvm_tec {
+ u64 tec;
+ bool tec_created;
+ void *tec_run;
+};
+
+int kvm_init_tmm(void);
+int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap);
+int kvm_init_cvm_vm(struct kvm *kvm);
+void kvm_destroy_cvm(struct kvm *kvm);
+int kvm_create_tec(struct kvm_vcpu *vcpu);
+void kvm_destroy_tec(struct kvm_vcpu *vcpu);
+int kvm_tec_enter(struct kvm_vcpu *vcpu);
+int handle_cvm_exit(struct kvm_vcpu *vcpu, int rec_run_status);
+int kvm_arm_create_cvm(struct kvm *kvm);
+void kvm_free_rd(struct kvm *kvm);
+int cvm_create_rd(struct kvm *kvm);
+int kvm_arm_cvm_first_run(struct kvm_vcpu *vcpu);
+int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target);
+int kvm_arch_tec_init(struct kvm_vcpu *vcpu);
+
+void kvm_cvm_unmap_destroy_range(struct kvm *kvm);
+
+#define CVM_TTT_BLOCK_LEVEL 2
+#define CVM_TTT_MAX_LEVEL 3
+
+#define CVM_PAGE_SHIFT 12
+#define CVM_PAGE_SIZE BIT(CVM_PAGE_SHIFT)
+#define CVM_TTT_LEVEL_SHIFT(l) \
+ ((CVM_PAGE_SHIFT - 3) * (4 - (l)) + 3)
+#define CVM_L2_BLOCK_SIZE BIT(CVM_TTT_LEVEL_SHIFT(2))
+
+static inline unsigned long cvm_ttt_level_mapsize(int level)
+{
+ if (WARN_ON(level > CVM_TTT_BLOCK_LEVEL))
+ return CVM_PAGE_SIZE;
+
+ return (1UL << CVM_TTT_LEVEL_SHIFT(level));
+}
+
+#endif
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index d984a6041..7c24a4d33 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -46,6 +46,14 @@ menuconfig KVM
If unsure, say N.
+config CVM_HOST
+ bool "CVM_Enabled"
+ depends on KVM && ARM64
+ help
+ Support CVM based on S-EL2
+
+ If unsure, say N.
+
if KVM
source "virt/kvm/Kconfig"
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 395d65165..3b92eaa4f 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -24,5 +24,9 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
vgic/vgic-its.o vgic/shadow_dev.o vgic/vgic-debug.o
+kvm-$(CONFIG_CVM_HOST) += tmi.o
+kvm-$(CONFIG_CVM_HOST) += cvm.o
+kvm-$(CONFIG_CVM_HOST) += cvm_exit.o
+
kvm-$(CONFIG_KVM_ARM_PMU) += pmu-emul.o
obj-$(CONFIG_KVM_HISI_VIRT) += hisilicon/
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index f9d6a5cd4..a2443d8da 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -16,6 +16,10 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
+
#include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h>
@@ -138,10 +142,79 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
}
}
+#ifdef CONFIG_CVM_HOST
+static bool cvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
+{
+ return timer_ctx &&
+ ((timer_get_ctl(timer_ctx) &
+ (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
+}
+
+void kvm_cvm_timers_update(struct kvm_vcpu *vcpu)
+{
+ int i;
+ u64 cval, now;
+ bool status, level;
+ struct arch_timer_context *timer;
+ struct arch_timer_cpu *arch_timer = &vcpu->arch.timer_cpu;
+
+ for (i = 0; i < NR_KVM_TIMERS; i++) {
+ timer = &arch_timer->timers[i];
+
+ if (!timer->loaded) {
+ if (!cvm_timer_irq_can_fire(timer))
+ continue;
+ cval = timer_get_cval(timer);
+ now = kvm_phys_timer_read() - timer_get_offset(timer);
+ level = (cval <= now);
+ kvm_timer_update_irq(vcpu, level, timer);
+ } else {
+ status = timer_get_ctl(timer) & ARCH_TIMER_CTRL_IT_STAT;
+ level = cvm_timer_irq_can_fire(timer) && status;
+ if (level != timer->irq.level)
+ kvm_timer_update_irq(vcpu, level, timer);
+ }
+ }
+}
+
+static void set_cvm_timers_loaded(struct kvm_vcpu *vcpu, bool loaded)
+{
+ int i;
+ struct arch_timer_cpu *arch_timer = &vcpu->arch.timer_cpu;
+
+ for (i = 0; i < NR_KVM_TIMERS; i++) {
+ struct arch_timer_context *timer = &arch_timer->timers[i];
+ timer->loaded = loaded;
+ }
+}
+
+static void kvm_timer_blocking(struct kvm_vcpu *vcpu);
+static void kvm_timer_unblocking(struct kvm_vcpu *vcpu);
+
+static inline void cvm_vcpu_load_timer_callback(struct kvm_vcpu *vcpu)
+{
+ kvm_cvm_timers_update(vcpu);
+ kvm_timer_unblocking(vcpu);
+ set_cvm_timers_loaded(vcpu, true);
+}
+
+static inline void cvm_vcpu_put_timer_callback(struct kvm_vcpu *vcpu)
+{
+ set_cvm_timers_loaded(vcpu, false);
+ if (rcuwait_active(kvm_arch_vcpu_get_wait(vcpu)))
+ kvm_timer_blocking(vcpu);
+}
+#endif
+
static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
+#ifdef CONFIG_CVM_HOST
+ if (kvm_is_cvm(vcpu->kvm))
+ return;
+#endif
+
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
__vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset;
@@ -667,6 +740,13 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct timer_map map;
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ cvm_vcpu_load_timer_callback(vcpu);
+ return;
+ }
+#endif
+
if (unlikely(!timer->enabled))
return;
@@ -752,6 +832,13 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
struct timer_map map;
struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ cvm_vcpu_put_timer_callback(vcpu);
+ return;
+ }
+#endif
+
if (unlikely(!timer->enabled))
return;
@@ -898,7 +985,12 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
ptimer->vcpu = vcpu;
/* Synchronize cntvoff across all vtimers of a VM. */
- update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
+#ifdef CONFIG_CVM_HOST
+ if (kvm_is_cvm(vcpu->kvm))
+ update_vtimer_cntvoff(vcpu, 0);
+ else
+#endif
+ update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
timer_set_offset(ptimer, 0);
hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
@@ -1356,6 +1448,16 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
return -EINVAL;
}
+#ifdef CONFIG_CVM_HOST
+ /*
+ * We don't use mapped IRQs for CVM because the TMI doesn't allow
+ * us setting the LR.HW bit in the VGIC.
+ */
+ if (vcpu_is_tec(vcpu)) {
+ return 0;
+ }
+#endif
+
get_timer_map(vcpu, &map);
if (vtimer_is_irqbypass())
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 718f6060b..32974a10e 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -38,6 +38,9 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/sections.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
#include <kvm/arm_hypercalls.h>
#include <kvm/arm_pmu.h>
@@ -108,6 +111,12 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
r = 0;
kvm->arch.return_nisv_io_abort_to_user = true;
break;
+#ifdef CONFIG_CVM_HOST
+ case KVM_CAP_ARM_TMM:
+ if (static_branch_unlikely(&kvm_cvm_is_available))
+ r = kvm_cvm_enable_cap(kvm, cap);
+ break;
+#endif
default:
r = -EINVAL;
break;
@@ -149,13 +158,29 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return ret;
#endif
+#ifdef CONFIG_CVM_HOST
+ if (kvm_arm_cvm_type(type)) {
+ ret = cvm_create_rd(kvm);
+ if (ret)
+ return ret;
+ }
+#endif
+
ret = kvm_arm_setup_stage2(kvm, type);
if (ret)
+#ifdef CONFIG_CVM_HOST
+ goto out_free_rd;
+#else
return ret;
+#endif
ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu);
if (ret)
+#ifdef CONFIG_CVM_HOST
+ goto out_free_rd;
+#else
return ret;
+#endif
ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
if (ret)
@@ -167,10 +192,21 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.max_vcpus = kvm_arm_default_max_vcpus();
set_default_csv2(kvm);
+#ifdef CONFIG_CVM_HOST
+ if (kvm_arm_cvm_type(type)) {
+ ret = kvm_init_cvm_vm(kvm);
+ if (ret)
+ goto out_free_stage2_pgd;
+ }
+#endif
return ret;
out_free_stage2_pgd:
kvm_free_stage2_pgd(&kvm->arch.mmu);
+#ifdef CONFIG_CVM_HOST
+out_free_rd:
+ kvm_free_rd(kvm);
+#endif
return ret;
}
@@ -203,6 +239,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
}
}
atomic_set(&kvm->online_vcpus, 0);
+#ifdef CONFIG_CVM_HOST
+ if (kvm_is_cvm(kvm))
+ kvm_destroy_cvm(kvm);
+#endif
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -258,11 +298,21 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = 1;
break;
case KVM_CAP_STEAL_TIME:
+#ifdef CONFIG_CVM_HOST
+ if (kvm && kvm_is_cvm(kvm))
+ r = 0;
+ else
+#endif
r = kvm_arm_pvtime_supported();
break;
case KVM_CAP_ARM_VIRT_MSI_BYPASS:
r = sdev_enable;
break;
+#ifdef CONFIG_CVM_HOST
+ case KVM_CAP_ARM_TMM:
+ r = static_key_enabled(&kvm_cvm_is_available);
+ break;
+#endif
default:
r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
break;
@@ -358,6 +408,13 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
return err;
#endif
+#ifdef CONFIG_CVM_HOST
+ if (kvm_is_cvm(vcpu->kvm)) {
+ err = kvm_arch_tec_init(vcpu);
+ if (err)
+ return err;
+ }
+#endif
return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
}
@@ -444,8 +501,23 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu->cpu = cpu;
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ if (single_task_running())
+ vcpu_clear_wfx_traps(vcpu);
+ else
+ vcpu_set_wfx_traps(vcpu);
+ }
+#endif
kvm_vgic_load(vcpu);
kvm_timer_vcpu_load(vcpu);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
+ kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
+ return;
+ }
+#endif
if (has_vhe())
kvm_vcpu_load_sysregs_vhe(vcpu);
kvm_arch_vcpu_load_fp(vcpu);
@@ -472,6 +544,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ kvm_cvm_vcpu_put(vcpu);
+ return;
+ }
+#endif
kvm_arch_vcpu_put_fp(vcpu);
if (has_vhe())
kvm_vcpu_put_sysregs_vhe(vcpu);
@@ -662,6 +740,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* Tell the rest of the code that there are userspace irqchip
* VMs in the wild.
*/
+#ifdef CONFIG_CVM_HOST
+ if (!kvm_is_cvm(kvm))
+#endif
static_branch_inc(&userspace_irqchip_in_use);
}
@@ -830,7 +911,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
ret = kvm_vcpu_first_run_init(vcpu);
if (ret)
return ret;
-
+#ifdef CONFIG_CVM_HOST
+ if (kvm_is_cvm(vcpu->kvm)) {
+ ret = kvm_arm_cvm_first_run(vcpu);
+ if (ret)
+ return ret;
+ }
+#endif
if (run->exit_reason == KVM_EXIT_MMIO) {
ret = kvm_handle_mmio_return(vcpu);
if (ret)
@@ -905,8 +992,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
trace_kvm_entry(vcpu->vcpu_id, *vcpu_pc(vcpu));
guest_enter_irqoff();
-
- ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ ret = kvm_tec_enter(vcpu);
+ else
+#endif
+ ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->stat.exits++;
@@ -961,11 +1052,16 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
* guest time.
*/
guest_exit();
- trace_kvm_exit(vcpu->vcpu_id, ret, *vcpu_pc(vcpu));
-
- /* Exit types that need handling before we can be preempted */
- handle_exit_early(vcpu, ret);
+#ifdef CONFIG_CVM_HOST
+ if (!vcpu_is_tec(vcpu)) {
+#endif
+ trace_kvm_exit(vcpu->vcpu_id, ret, *vcpu_pc(vcpu));
+ /* Exit types that need handling before we can be preempted */
+ handle_exit_early(vcpu, ret);
+#ifdef CONFIG_CVM_HOST
+ }
+#endif
preempt_enable();
/*
@@ -986,8 +1082,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
vcpu->arch.target = -1;
ret = ARM_EXCEPTION_IL;
}
-
- ret = handle_exit(vcpu, ret);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ ret = handle_cvm_exit(vcpu, ret);
+ else
+#endif
+ ret = handle_exit(vcpu, ret);
update_vcpu_stat_time(&vcpu->stat);
}
@@ -1419,6 +1519,11 @@ long kvm_arch_vm_ioctl(struct file *filp,
void __user *argp = (void __user *)arg;
switch (ioctl) {
+#ifdef CONFIG_CVM_HOST
+ case KVM_LOAD_USER_DATA: {
+ return kvm_load_user_data(kvm, arg);
+ }
+#endif
case KVM_CREATE_IRQCHIP: {
int ret;
if (!vgic_present)
@@ -1950,7 +2055,13 @@ int kvm_arch_init(void *opaque)
kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n");
return -ENODEV;
}
-
+#ifdef CONFIG_CVM_HOST
+ if (static_branch_unlikely(&kvm_cvm_is_enable) && in_hyp_mode) {
+ err = kvm_init_tmm();
+ if (err)
+ return err;
+ }
+#endif
if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
cpus_have_final_cap(ARM64_WORKAROUND_1508412))
kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
diff --git a/arch/arm64/kvm/cvm.c b/arch/arm64/kvm/cvm.c
new file mode 100644
index 000000000..11f82c07c
--- /dev/null
+++ b/arch/arm64/kvm/cvm.c
@@ -0,0 +1,824 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+#include <linux/kvm_host.h>
+#include <linux/kvm.h>
+#include <asm/kvm_tmi.h>
+#include <asm/kvm_pgtable.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_mmu.h>
+#include <asm/stage2_pgtable.h>
+#include <linux/arm-smccc.h>
+#include <kvm/arm_hypercalls.h>
+#include <kvm/arm_psci.h>
+
+/* Protects access to cvm_vmid_bitmap */
+static DEFINE_SPINLOCK(cvm_vmid_lock);
+static unsigned long *cvm_vmid_bitmap;
+DEFINE_STATIC_KEY_FALSE(kvm_cvm_is_available);
+DEFINE_STATIC_KEY_FALSE(kvm_cvm_is_enable);
+
+static int __init setup_cvm_host(char *str)
+{
+ int ret;
+ unsigned int val;
+
+ if (!str)
+ return 0;
+
+ ret = kstrtouint(str, 10, &val);
+ if (ret) {
+ pr_warn("Unable to parse cvm_guest.\n");
+ } else {
+ if (val)
+ static_branch_enable(&kvm_cvm_is_enable);
+ }
+ return ret;
+}
+early_param("cvm_host", setup_cvm_host);
+
+u64 cvm_phys_to_phys(u64 phys)
+{
+ return phys;
+}
+
+u64 phys_to_cvm_phys(u64 phys)
+{
+ return phys;
+}
+
+static int cvm_vmid_init(void)
+{
+ unsigned int vmid_count = 1 << kvm_get_vmid_bits();
+
+ cvm_vmid_bitmap = bitmap_zalloc(vmid_count, GFP_KERNEL);
+ if (!cvm_vmid_bitmap) {
+ kvm_err("%s: Couldn't allocate cvm vmid bitmap\n", __func__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static unsigned long tmm_feat_reg0;
+
+static bool tmm_supports(unsigned long feature)
+{
+ return !!u64_get_bits(tmm_feat_reg0, feature);
+}
+
+bool kvm_cvm_supports_sve(void)
+{
+ return tmm_supports(TMI_FEATURE_REGISTER_0_SVE_EN);
+}
+
+bool kvm_cvm_supports_pmu(void)
+{
+ return tmm_supports(TMI_FEATURE_REGISTER_0_PMU_EN);
+}
+
+u32 kvm_cvm_ipa_limit(void)
+{
+ return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_S2SZ);
+}
+
+u32 kvm_cvm_get_num_brps(void)
+{
+ return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_NUM_BPS);
+}
+
+u32 kvm_cvm_get_num_wrps(void)
+{
+ return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_NUM_WPS);
+}
+
+static int cvm_vmid_reserve(void)
+{
+ int ret;
+ unsigned int vmid_count = 1 << kvm_get_vmid_bits();
+
+ spin_lock(&cvm_vmid_lock);
+ ret = bitmap_find_free_region(cvm_vmid_bitmap, vmid_count, 0);
+ spin_unlock(&cvm_vmid_lock);
+
+ return ret;
+}
+
+static void cvm_vmid_release(unsigned int vmid)
+{
+ spin_lock(&cvm_vmid_lock);
+ bitmap_release_region(cvm_vmid_bitmap, vmid, 0);
+ spin_unlock(&cvm_vmid_lock);
+}
+
+static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
+{
+ u64 shift = ARM64_HW_PGTABLE_LEVEL_SHIFT(pgt->start_level - 1);
+ u64 mask = BIT(pgt->ia_bits) - 1;
+
+ return (addr & mask) >> shift;
+}
+
+static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
+{
+ struct kvm_pgtable pgt = {
+ .ia_bits = ia_bits,
+ .start_level = start_level,
+ };
+ return __kvm_pgd_page_idx(&pgt, -1ULL) + 1;
+}
+
+int kvm_arm_create_cvm(struct kvm *kvm)
+{
+ int ret;
+ struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
+ unsigned int pgd_sz;
+
+ if (!kvm_is_cvm(kvm) || kvm_cvm_state(kvm) != CVM_STATE_NONE) {
+ return 0;
+ }
+
+ ret = cvm_vmid_reserve();
+ if (ret < 0) {
+ return ret;
+ }
+ kvm->arch.cvm.cvm_vmid = ret;
+
+ pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level);
+
+ kvm->arch.cvm.params->ttt_base = phys_to_cvm_phys(kvm->arch.mmu.pgd_phys);
+ kvm->arch.cvm.params->measurement_algo = 0;
+ kvm->arch.cvm.params->ttt_level_start = kvm->arch.mmu.pgt->start_level;
+ kvm->arch.cvm.params->ttt_num_start = pgd_sz;
+ kvm->arch.cvm.params->s2sz = VTCR_EL2_IPA(kvm->arch.vtcr);
+ kvm->arch.cvm.params->vmid = kvm->arch.cvm.cvm_vmid;
+ kvm->arch.cvm.params->ns_vtcr = kvm->arch.vtcr;
+ kvm->arch.cvm.params->vttbr_el2 = kvm->arch.mmu.pgd_phys;
+ ret = tmi_cvm_create(kvm->arch.cvm.rd, __pa(kvm->arch.cvm.params));
+ if (!ret) {
+ kvm_info("KVM creates cVM: %d\n", kvm->arch.cvm.cvm_vmid);
+ }
+
+ WRITE_ONCE(kvm->arch.cvm.state, CVM_STATE_NEW);
+ kfree(kvm->arch.cvm.params);
+ kvm->arch.cvm.params = NULL;
+ return ret;
+}
+
+int cvm_create_rd(struct kvm *kvm)
+{
+ if (!static_key_enabled(&kvm_cvm_is_available))
+ return -EFAULT;
+
+ kvm->arch.cvm.rd = tmi_mem_alloc(kvm->arch.cvm.rd, NO_NUMA, TMM_MEM_TYPE_RD, TMM_MEM_MAP_SIZE_MAX);
+ if (!kvm->arch.cvm.rd) {
+ kvm_err("tmi_mem_alloc for cvm rd failed: %d\n", kvm->arch.cvm.cvm_vmid);
+ return -ENOMEM;
+ }
+ kvm->arch.is_cvm = true;
+ return 0;
+}
+
+void kvm_free_rd(struct kvm *kvm)
+{
+ int ret;
+
+ if (!kvm->arch.cvm.rd)
+ return;
+
+ ret = tmi_mem_free(kvm->arch.cvm.rd, NO_NUMA, TMM_MEM_TYPE_RD, TMM_MEM_MAP_SIZE_MAX);
+ if (ret)
+ kvm_err("tmi_mem_free for cvm rd failed: %d\n", kvm->arch.cvm.cvm_vmid);
+ else
+ kvm->arch.cvm.rd = 0;
+}
+
+void kvm_destroy_cvm(struct kvm *kvm)
+{
+ uint32_t cvm_vmid = kvm->arch.cvm.cvm_vmid;
+
+ if (kvm->arch.cvm.params) {
+ kfree(kvm->arch.cvm.params);
+ kvm->arch.cvm.params = NULL;
+ }
+
+ if (kvm_cvm_state(kvm) == CVM_STATE_NONE)
+ return;
+
+ cvm_vmid_release(cvm_vmid);
+
+ WRITE_ONCE(kvm->arch.cvm.state, CVM_STATE_DYING);
+
+ if (!tmi_cvm_destroy(kvm->arch.cvm.rd)) {
+ kvm_info("KVM has destroyed cVM: %d\n", kvm->arch.cvm.cvm_vmid);
+ }
+
+ kvm_free_rd(kvm);
+}
+
+static int kvm_get_host_numa_node_by_ipa(uint64_t ipa, struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct kvm_numa_info *numa_info = &vcpu->kvm->arch.cvm.numa_info;
+ for (i = 0; i < numa_info->numa_cnt && i < MAX_NUMA_NODE; i++) {
+ struct kvm_numa_node *numa_node = &numa_info->numa_nodes[i];
+ if (ipa >= numa_node->ipa_start && ipa < (numa_node->ipa_start + numa_node->ipa_size)) {
+ return numa_node->host_numa_node;
+ }
+ }
+ return NO_NUMA;
+}
+
+static int kvm_cvm_ttt_create(struct cvm *cvm,
+ unsigned long addr,
+ int level,
+ phys_addr_t phys)
+{
+ addr = ALIGN_DOWN(addr, cvm_ttt_level_mapsize(level - 1));
+ return tmi_ttt_create(phys, cvm->rd, addr, level);
+}
+
+int kvm_cvm_create_ttt_levels(struct kvm *kvm, struct cvm *cvm,
+ unsigned long ipa,
+ int level,
+ int max_level,
+ struct kvm_mmu_memory_cache *mc)
+{
+ if (WARN_ON(level == max_level))
+ return 0;
+
+ while (level++ < max_level) {
+ phys_addr_t ttt;
+ ttt = tmi_mem_alloc(cvm->rd, NO_NUMA, TMM_MEM_TYPE_TTT, TMM_MEM_MAP_SIZE_MAX);
+ if (ttt == 0)
+ return -ENOMEM;
+
+ if (kvm_cvm_ttt_create(cvm, ipa, level, ttt)) {
+ (void)tmi_mem_free(ttt, NO_NUMA, TMM_MEM_TYPE_TTT, TMM_MEM_MAP_SIZE_MAX);
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+static int kvm_cvm_create_protected_data_page(struct kvm *kvm, struct cvm *cvm, unsigned long ipa, int level,
+ struct page *src_page, phys_addr_t dst_phys)
+{
+ phys_addr_t src_phys;
+ int ret;
+
+ src_phys = page_to_phys(src_page);
+ ret = tmi_data_create(dst_phys, cvm->rd, ipa, src_phys, level);
+ if (TMI_RETURN_STATUS(ret) == TMI_ERROR_TTT_WALK) {
+ /* Create missing RTTs and retry */
+ int level_fault = TMI_RETURN_INDEX(ret);
+ ret = kvm_cvm_create_ttt_levels(kvm, cvm, ipa, level_fault,
+ level, NULL);
+ if (ret)
+ goto err;
+ ret = tmi_data_create(dst_phys, cvm->rd, ipa, src_phys, level);
+ }
+ WARN_ON(ret);
+
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ return -ENXIO;
+}
+
+static u64 cvm_granule_size(u32 level)
+{
+ return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
+}
+
+int kvm_cvm_populate_par_region(struct kvm *kvm,
+ phys_addr_t ipa_base,
+ phys_addr_t ipa_end,
+ phys_addr_t dst_phys)
+{
+ struct cvm *cvm = &kvm->arch.cvm;
+ struct kvm_memory_slot *memslot;
+ gfn_t base_gfn, end_gfn;
+ int idx;
+ phys_addr_t ipa;
+ int ret = 0;
+ int level = TMM_TTT_LEVEL_3;
+ unsigned long map_size = cvm_granule_size(level);
+
+ base_gfn = gpa_to_gfn(ipa_base);
+ end_gfn = gpa_to_gfn(ipa_end);
+
+ idx = srcu_read_lock(&kvm->srcu);
+ memslot = gfn_to_memslot(kvm, base_gfn);
+ if (!memslot) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* We require the region to be contained within a single memslot */
+ if (memslot->base_gfn + memslot->npages < end_gfn) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mmap_read_lock(current->mm);
+
+ ipa = ipa_base;
+ while (ipa < ipa_end) {
+ struct page *page;
+ kvm_pfn_t pfn;
+
+ /*
+ * FIXME: This causes over mapping, but there's no good
+ * solution here with the ABI as it stands
+ */
+ ipa = ALIGN_DOWN(ipa, map_size);
+
+ pfn = gfn_to_pfn_memslot(memslot, gpa_to_gfn(ipa));
+
+ if (is_error_pfn(pfn)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ page = pfn_to_page(pfn);
+
+ ret = kvm_cvm_create_protected_data_page(kvm, cvm, ipa, level, page, dst_phys);
+ if (ret)
+ goto err_release_pfn;
+
+ ipa += map_size;
+ dst_phys += map_size;
+ kvm_release_pfn_dirty(pfn);
+err_release_pfn:
+ if (ret) {
+ kvm_release_pfn_clean(pfn);
+ break;
+ }
+ }
+
+ mmap_read_unlock(current->mm);
+out:
+ srcu_read_unlock(&kvm->srcu, idx);
+ return ret;
+}
+
+static int kvm_sel2_map_protected_ipa(struct kvm_vcpu *vcpu)
+{
+ int ret = 0;
+ gpa_t gpa, gpa_data_end, gpa_end, data_size;
+ u64 map_size, dst_phys;
+ u64 l2_granule = cvm_granule_size(2); /* 2MB */
+ u64 numa_id;
+ int cur_numa_id;
+
+ /* 2MB alignment below addresses*/
+ gpa = vcpu->kvm->arch.cvm.loader_start;
+ gpa_end = vcpu->kvm->arch.cvm.loader_start + vcpu->kvm->arch.cvm.ram_size;
+ data_size = vcpu->kvm->arch.cvm.initrd_start - vcpu->kvm->arch.cvm.loader_start +
+ vcpu->kvm->arch.cvm.initrd_size;
+ data_size = round_up(data_size, l2_granule);
+ gpa_data_end = vcpu->kvm->arch.cvm.loader_start + data_size + l2_granule;
+ gpa = round_down(gpa, l2_granule);
+ gpa_end = round_up(gpa_end, l2_granule);
+ gpa_data_end = round_up(gpa_data_end, l2_granule);
+
+ /* get numa_id */
+ numa_id = kvm_get_host_numa_node_by_ipa(gpa, vcpu);
+ map_size = l2_granule;
+ do {
+ dst_phys = tmi_mem_alloc(vcpu->kvm->arch.cvm.rd, numa_id, TMM_MEM_TYPE_CVM_PA, map_size);
+ if (!dst_phys) {
+ ret = -ENOMEM;
+ kvm_err("[%s] call tmi_mem_alloc failed.\n", __func__);
+ goto out;
+ }
+
+ /* Try to call tmi_data_create to copy kernel data, and call tmi_data_create
+ *to map all necessary gpa for system boot, only copy the data_size is not enough
+ *to boot kernel, here, we copy and map another 2MB. */
+ ret = kvm_cvm_populate_par_region(vcpu->kvm, gpa, gpa + map_size, dst_phys);
+ if (ret) {
+ ret = -EFAULT;
+ kvm_err("kvm_cvm_populate_par_region fail:%d.\n", ret);
+ goto out;
+ }
+ gpa += map_size;
+ dst_phys += map_size;
+ } while (gpa < gpa_data_end);
+
+ cur_numa_id = numa_node_id();
+ if (cur_numa_id < 0) {
+ ret = -EFAULT;
+ kvm_err("get current numa node fail\n");
+ goto out;
+ }
+
+ /* Map gpa range to secure mem without copy data from host.
+ * The cvm gpa map pages will free by destroy cvm. */
+ ret = tmi_ttt_map_range(vcpu->kvm->arch.cvm.rd, gpa_data_end,
+ gpa_end - gpa_data_end, cur_numa_id, numa_id);
+ if (ret)
+ kvm_err("tmi_ttt_map_range fail:%d.\n", ret);
+out:
+ return ret;
+}
+
+int kvm_create_tec(struct kvm_vcpu *vcpu)
+{
+ int ret;
+ int i;
+ struct tmi_tec_params *params_ptr;
+ struct user_pt_regs *vcpu_regs = vcpu_gp_regs(vcpu);
+ uint64_t mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
+
+ params_ptr = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
+ if (!params_ptr) {
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < TEC_CREATE_NR_GPRS; ++i) {
+ params_ptr->gprs[i] = vcpu_regs->regs[i];
+ }
+
+ params_ptr->pc = vcpu_regs->pc;
+
+ if (vcpu->vcpu_id == 0) {
+ params_ptr->flags = TMI_RUNNABLE;
+ } else {
+ params_ptr->flags = TMI_NOT_RUNNABLE;
+ }
+ params_ptr->ram_size = vcpu->kvm->arch.cvm.ram_size;
+ ret = tmi_tec_create(vcpu->arch.tec.tec, vcpu->kvm->arch.cvm.rd, mpidr, __pa(params_ptr));
+
+ kfree(params_ptr);
+
+ return ret;
+}
+
+static int kvm_create_all_tecs(struct kvm *kvm)
+{
+ int ret = 0;
+ struct kvm_vcpu *vcpu;
+ unsigned long i;
+
+ if (READ_ONCE(kvm->arch.cvm.state) == CVM_STATE_ACTIVE) {
+ return -1;
+ }
+
+ mutex_lock(&kvm->lock);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!vcpu->arch.tec.tec_created) {
+ ret = kvm_create_tec(vcpu);
+ if (ret) {
+ mutex_unlock(&kvm->lock);
+ return ret;
+ }
+ vcpu->arch.tec.tec_created = true;
+ }
+ }
+ mutex_unlock(&kvm->lock);
+ return ret;
+}
+
+static int config_cvm_sve(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *cfg)
+{
+ tmi_cvm_params_t *params = kvm->arch.cvm.params;
+
+ int max_sve_vq = u64_get_bits(tmm_feat_reg0,
+ TMI_FEATURE_REGISTER_0_SVE_VL);
+
+ if (!kvm_cvm_supports_sve())
+ return -EINVAL;
+
+ if (cfg->sve_vq > max_sve_vq)
+ return -EINVAL;
+
+ params->sve_vl = cfg->sve_vq;
+ params->flags |= TMI_CVM_PARAM_FLAG_SVE;
+
+ return 0;
+}
+
+static int config_cvm_pmu(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *cfg)
+{
+ tmi_cvm_params_t *params = kvm->arch.cvm.params;
+
+ int max_pmu_num_ctrs = u64_get_bits(tmm_feat_reg0,
+ TMI_FEATURE_REGISTER_0_PMU_NUM_CTRS);
+
+ if (!kvm_cvm_supports_pmu())
+ return -EINVAL;
+
+ if (cfg->num_pmu_cntrs > max_pmu_num_ctrs)
+ return -EINVAL;
+
+ params->pmu_num_cnts = cfg->num_pmu_cntrs;
+ params->flags |= TMI_CVM_PARAM_FLAG_PMU;
+
+ return 0;
+}
+
+static int kvm_tmm_config_cvm(struct kvm *kvm, struct kvm_enable_cap *cap)
+{
+ struct kvm_cap_arm_tmm_config_item cfg;
+ int r = 0;
+
+ if (kvm_cvm_state(kvm) != CVM_STATE_NONE)
+ return -EBUSY;
+
+ if (copy_from_user(&cfg, (void __user *)cap->args[1], sizeof(cfg))) {
+ return -EFAULT;
+ }
+
+ switch (cfg.cfg) {
+ case KVM_CAP_ARM_TMM_CFG_SVE:
+ r = config_cvm_sve(kvm, &cfg);
+ break;
+ case KVM_CAP_ARM_TMM_CFG_PMU:
+ r = config_cvm_pmu(kvm, &cfg);
+ break;
+ default:
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
+{
+ int r = 0;
+
+ mutex_lock(&kvm->lock);
+ switch (cap->args[0]) {
+ case KVM_CAP_ARM_TMM_CONFIG_CVM_HOST:
+ r = kvm_tmm_config_cvm(kvm, cap);
+ break;
+ case KVM_CAP_ARM_TMM_CREATE_CVM:
+ r = kvm_arm_create_cvm(kvm);
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+ mutex_unlock(&kvm->lock);
+
+ return r;
+}
+
+void kvm_destroy_tec(struct kvm_vcpu *vcpu)
+{
+ int ret = 0;
+ if (!vcpu_is_tec(vcpu)) {
+ return;
+ }
+
+ if (tmi_tec_destroy(vcpu->arch.tec.tec) != 0) {
+ kvm_err("%s vcpu id : %d failed!\n", __func__, vcpu->vcpu_id);
+ }
+
+ ret = tmi_mem_free(vcpu->arch.tec.tec, NO_NUMA, TMM_MEM_TYPE_TEC, TMM_MEM_MAP_SIZE_MAX);
+ if (ret != 0) {
+ kvm_err("tmi_mem_free for cvm tec failed\n");
+ }
+ vcpu->arch.tec.tec = 0;
+ kfree(vcpu->arch.tec.tec_run);
+}
+
+static int tmi_check_version(void)
+{
+ uint64_t res;
+ int version_major;
+ int version_minor;
+
+ res = tmi_version();
+ if (res == SMCCC_RET_NOT_SUPPORTED) {
+ return -ENXIO;
+ }
+
+ version_major = TMI_ABI_VERSION_GET_MAJOR(res);
+ version_minor = TMI_ABI_VERSION_GET_MINOR(res);
+
+ if (version_major != TMI_ABI_VERSION_MAJOR) {
+ kvm_err("Unsupported TMI_ABI (version %d %d)\n", version_major,
+ version_minor);
+ return -ENXIO;
+ }
+
+ kvm_info("TMI ABI version %d,%d\n", version_major, version_minor);
+ return 0;
+}
+
+static int kvm_kick_boot_vcpu(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ unsigned long i;
+
+ if (READ_ONCE(kvm->arch.cvm.state) == CVM_STATE_ACTIVE) {
+ return 0;
+ }
+
+ mutex_lock(&kvm->lock);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (i == 0) {
+ kvm_vcpu_kick(vcpu);
+ }
+ }
+ mutex_unlock(&kvm->lock);
+ return 0;
+}
+
+int kvm_arm_cvm_first_run(struct kvm_vcpu *vcpu)
+{
+ int ret = 0;
+
+ if (READ_ONCE(vcpu->kvm->arch.cvm.state) == CVM_STATE_ACTIVE) {
+ return ret;
+ }
+
+ if (vcpu->vcpu_id == 0) {
+ ret = kvm_create_all_tecs(vcpu->kvm);
+ if (ret != 0) {
+ return ret;
+ }
+ } else {
+ kvm_kick_boot_vcpu(vcpu->kvm);
+ }
+
+ mutex_lock(&vcpu->kvm->lock);
+
+ if (vcpu->vcpu_id == 0) {
+ ret = kvm_sel2_map_protected_ipa(vcpu);
+ if (ret) {
+ kvm_err("Map protected ipa failed!\n");
+ goto unlock_exit;
+ }
+ ret = tmi_cvm_activate(vcpu->kvm->arch.cvm.rd);
+ if (ret) {
+ kvm_err("tmi_cvm_activate failed!\n");
+ goto unlock_exit;
+ }
+
+ WRITE_ONCE(vcpu->kvm->arch.cvm.state, CVM_STATE_ACTIVE);
+ kvm_info("cVM%d is activated!\n", vcpu->kvm->arch.cvm.cvm_vmid);
+ }
+unlock_exit:
+ mutex_unlock(&vcpu->kvm->lock);
+
+ return ret;
+}
+
+int kvm_tec_enter(struct kvm_vcpu *vcpu)
+{
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+ if (READ_ONCE(vcpu->kvm->arch.cvm.state) != CVM_STATE_ACTIVE) {
+ return -EINVAL;
+ }
+
+ /* set/clear TWI TWE flags */
+ if (vcpu->arch.hcr_el2 & HCR_TWI) {
+ run->tec_entry.flags |= TEC_ENTRY_FLAG_TRAP_WFI;
+ } else {
+ run->tec_entry.flags &= ~TEC_ENTRY_FLAG_TRAP_WFI;
+ }
+
+ if (vcpu->arch.hcr_el2 & HCR_TWE) {
+ run->tec_entry.flags |= TEC_ENTRY_FLAG_TRAP_WFE;
+ } else {
+ run->tec_entry.flags &= ~TEC_ENTRY_FLAG_TRAP_WFE;
+ }
+
+ return tmi_tec_enter(vcpu->arch.tec.tec, __pa(run));
+}
+
+int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target)
+{
+ int ret;
+ ret = tmi_psci_complete(calling->arch.tec.tec, target->arch.tec.tec);
+ if (ret) {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#define SIMD_PAGE_SIZE 3*PAGE_SIZE
+int kvm_arch_tec_init(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.tec.tec_run = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
+ if (!vcpu->arch.tec.tec_run) {
+ return -ENOMEM;
+ }
+
+ vcpu->arch.tec.tec = tmi_mem_alloc(vcpu->kvm->arch.cvm.rd, NO_NUMA, TMM_MEM_TYPE_TEC, TMM_MEM_MAP_SIZE_MAX);
+ if (vcpu->arch.tec.tec == 0) {
+ kvm_info("KVM tmi_mem_alloc failed:%d\n", vcpu->vcpu_id);
+ return -ENOMEM;
+ }
+ kvm_info("KVM inits cVM VCPU:%d\n", vcpu->vcpu_id);
+
+ return 0;
+}
+
+int kvm_init_tmm(void)
+{
+ int ret;
+
+ if (PAGE_SIZE != SZ_4K) {
+ return 0;
+ }
+
+ if (tmi_check_version()) {
+ return 0;
+ }
+
+ ret = cvm_vmid_init();
+ if (ret) {
+ return ret;
+ }
+
+ tmm_feat_reg0 = tmi_features(0);
+ kvm_info("TMM feature0: 0x%lx\n", tmm_feat_reg0);
+
+ static_branch_enable(&kvm_cvm_is_available);
+
+ return 0;
+}
+
+int kvm_init_cvm_vm(struct kvm *kvm)
+{
+ struct tmi_cvm_params *params;
+
+ params = (struct tmi_cvm_params *)kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
+ if (!params) {
+ return -ENOMEM;
+ }
+
+ kvm->arch.cvm.params = params;
+
+ return 0;
+}
+
+int kvm_load_user_data(struct kvm *kvm, unsigned long arg)
+{
+ struct kvm_user_data user_data;
+ void __user *argp = (void __user *)arg;
+
+ if (!kvm_is_cvm(kvm))
+ return -EFAULT;
+
+ if (copy_from_user(&user_data, argp, sizeof(user_data)))
+ return -EFAULT;
+
+ kvm->arch.cvm.loader_start = user_data.loader_start;
+ kvm->arch.cvm.initrd_start = user_data.initrd_start;
+ kvm->arch.cvm.initrd_size = user_data.initrd_size;
+ kvm->arch.cvm.ram_size = user_data.ram_size;
+ memcpy(&kvm->arch.cvm.numa_info, &user_data.numa_info, sizeof(struct kvm_numa_info));
+
+ return 0;
+}
+
+void kvm_cvm_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ kvm_timer_vcpu_put(vcpu);
+ kvm_vgic_put(vcpu);
+ vcpu->cpu = -1;
+}
+unsigned long cvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu,
+ unsigned long target_affinity, unsigned long lowest_affinity_level)
+{
+ struct kvm_vcpu *target_vcpu;
+
+ if (lowest_affinity_level != 0)
+ return PSCI_RET_INVALID_PARAMS;
+
+ target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, target_affinity);
+ if (!target_vcpu)
+ return PSCI_RET_INVALID_PARAMS;
+
+ cvm_psci_complete(vcpu, target_vcpu);
+ return PSCI_RET_SUCCESS;
+}
+
+int kvm_cvm_vcpu_set_events(struct kvm_vcpu *vcpu,
+ bool serror_pending, bool ext_dabt_pending)
+{
+ if (serror_pending)
+ return -EINVAL;
+
+ if (ext_dabt_pending) {
+ if (!(((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_entry.flags &
+ TEC_ENTRY_FLAG_EMUL_MMIO))
+ return -EINVAL;
+
+ ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_entry.flags
+ &= ~TEC_ENTRY_FLAG_EMUL_MMIO;
+ ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_entry.flags
+ |= TEC_ENTRY_FLAG_INJECT_SEA;
+ }
+ return 0;
+}
diff --git a/arch/arm64/kvm/cvm_exit.c b/arch/arm64/kvm/cvm_exit.c
new file mode 100644
index 000000000..ba07a23be
--- /dev/null
+++ b/arch/arm64/kvm/cvm_exit.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+#include <linux/kvm_host.h>
+#include <kvm/arm_hypercalls.h>
+#include <kvm/arm_psci.h>
+
+#include <asm/kvm_tmi.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_coproc.h>
+
+typedef int (*exit_handler_fn)(struct kvm_vcpu *vcpu);
+
+static void update_arch_timer_irq_lines(struct kvm_vcpu *vcpu, bool unmask_ctl)
+{
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+
+ __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = run->tec_exit.cntv_ctl;
+ __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = run->tec_exit.cntv_cval;
+ __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = run->tec_exit.cntp_ctl;
+ __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = run->tec_exit.cntp_cval;
+
+ /* Because the timer mask is tainted by TMM, we don't know the
+ * true intent of the guest. Here, we assume mask is always
+ * cleared during WFI.
+ */
+ if (unmask_ctl) {
+ __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) &= ~ARCH_TIMER_CTRL_IT_MASK;
+ __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) &= ~ARCH_TIMER_CTRL_IT_MASK;
+ }
+
+ kvm_cvm_timers_update(vcpu);
+}
+
+static int tec_exit_reason_notimpl(struct kvm_vcpu *vcpu)
+{
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+
+ pr_err("[vcpu %d] Unhandled exit reason from cvm (ESR: %#llx)\n",
+ vcpu->vcpu_id, run->tec_exit.esr);
+ return -ENXIO;
+}
+
+/* The process is the same as kvm_handle_wfx,
+ * except the tracing and updating operation for pc,
+ * we copy kvm_handle_wfx process here
+ * to avoid changing kvm_handle_wfx function.
+ */
+static int tec_exit_wfx(struct kvm_vcpu *vcpu)
+{
+ u64 esr = kvm_vcpu_get_esr(vcpu);
+
+ if (esr & ESR_ELx_WFx_ISS_WFE) {
+ vcpu->stat.wfe_exit_stat++;
+ } else {
+ vcpu->stat.wfi_exit_stat++;
+ }
+
+ if (esr & ESR_ELx_WFx_ISS_WFxT) {
+ if (esr & ESR_ELx_WFx_ISS_RV) {
+ u64 val, now;
+
+ now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT);
+ val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
+
+ if (now >= val)
+ goto out;
+ } else {
+ /* Treat WFxT as WFx if RN is invalid */
+ esr &= ~ESR_ELx_WFx_ISS_WFxT;
+ }
+ }
+
+ if (esr & ESR_ELx_WFx_ISS_WFE) {
+ kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
+ } else {
+ vcpu->arch.pvsched.pv_unhalted = false;
+ if (esr & ESR_ELx_WFx_ISS_WFxT)
+ vcpu->arch.flags |= KVM_ARM64_WFIT;
+ kvm_vcpu_block(vcpu);
+ vcpu->arch.flags &= ~KVM_ARM64_WFIT;
+ kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+ }
+
+out:
+ return 1;
+}
+
+static int tec_exit_sys_reg(struct kvm_vcpu *vcpu)
+{
+ int ret;
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+ unsigned long esr = kvm_vcpu_get_esr(vcpu);
+ int rt = kvm_vcpu_sys_get_rt(vcpu);
+ bool is_write = !(esr & 1);
+
+ if (is_write) {
+ vcpu_set_reg(vcpu, rt, run->tec_exit.gprs[0]);
+ }
+
+ ret = kvm_handle_sys_reg(vcpu);
+
+ if (ret >= 0 && !is_write) {
+ run->tec_entry.gprs[0] = vcpu_get_reg(vcpu, rt);
+ }
+ return ret;
+}
+
+static int tec_exit_sync_dabt(struct kvm_vcpu *vcpu)
+{
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+
+ if (kvm_vcpu_dabt_iswrite(vcpu) && kvm_vcpu_dabt_isvalid(vcpu)) {
+ vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu),
+ run->tec_exit.gprs[0]);
+ }
+ return kvm_handle_guest_abort(vcpu);
+}
+
+static int tec_exit_sync_iabt(struct kvm_vcpu *vcpu)
+{
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+
+ pr_err("[vcpu %d] Unhandled instruction abort (ESR: %#llx).\n",
+ vcpu->vcpu_id, run->tec_exit.esr);
+
+ return -ENXIO;
+}
+
+static exit_handler_fn tec_exit_handlers[] = {
+ [0 ... ESR_ELx_EC_MAX] = tec_exit_reason_notimpl,
+ [ESR_ELx_EC_WFx] = tec_exit_wfx,
+ [ESR_ELx_EC_SYS64] = tec_exit_sys_reg,
+ [ESR_ELx_EC_DABT_LOW] = tec_exit_sync_dabt,
+ [ESR_ELx_EC_IABT_LOW] = tec_exit_sync_iabt
+};
+
+static int tec_exit_psci(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+
+ for (i = 0; i < TEC_EXIT_NR_GPRS; ++i) {
+ vcpu_set_reg(vcpu, i, run->tec_exit.gprs[i]);
+ }
+
+ return kvm_psci_call(vcpu);
+}
+
+static int tec_exit_host_call(struct kvm_vcpu *vcpu)
+{
+ int ret, i;
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+
+ vcpu->stat.hvc_exit_stat++;
+
+ for (i = 0; i < TEC_EXIT_NR_GPRS; ++i) {
+ vcpu_set_reg(vcpu, i, run->tec_exit.gprs[i]);
+ }
+
+ ret = kvm_hvc_call_handler(vcpu);
+
+ if (ret < 0) {
+ vcpu_set_reg(vcpu, 0, ~0UL);
+ ret = 1;
+ }
+ for (i = 0; i < TEC_EXIT_NR_GPRS; ++i) {
+ run->tec_entry.gprs[i] = vcpu_get_reg(vcpu, i);
+ }
+
+ return ret;
+}
+
+/*
+ * Return > 0 to return to guest, < 0 on error, 0(and set exit_reason) on
+ * proper exit to userspace
+ */
+
+int handle_cvm_exit(struct kvm_vcpu *vcpu, int tec_run_ret)
+{
+ unsigned long status;
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+ u8 esr_ec = ESR_ELx_EC(run->tec_exit.esr);
+ bool is_wfx;
+
+ status = TMI_RETURN_STATUS(tec_run_ret);
+
+ if (status == TMI_ERROR_CVM_POWEROFF) {
+ vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+ vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SHUTDOWN;
+ return 0;
+ }
+
+ if (status == TMI_ERROR_CVM_STATE) {
+ vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
+ return 0;
+ }
+
+ if (tec_run_ret) {
+ return -ENXIO;
+ }
+
+ vcpu->arch.fault.esr_el2 = run->tec_exit.esr;
+ vcpu->arch.fault.far_el2 = run->tec_exit.far;
+ vcpu->arch.fault.hpfar_el2 = run->tec_exit.hpfar;
+
+ is_wfx = (run->tec_exit.exit_reason == TMI_EXIT_SYNC) && (esr_ec == ESR_ELx_EC_WFx);
+ update_arch_timer_irq_lines(vcpu, is_wfx);
+
+ run->tec_entry.flags = 0;
+
+ switch (run->tec_exit.exit_reason) {
+ case TMI_EXIT_FIQ:
+ case TMI_EXIT_IRQ:
+ return 1;
+ case TMI_EXIT_PSCI:
+ return tec_exit_psci(vcpu);
+ case TMI_EXIT_SYNC:
+ return tec_exit_handlers[esr_ec](vcpu);
+ case TMI_EXIT_HOST_CALL:
+ return tec_exit_host_call(vcpu);
+ }
+
+ kvm_pr_unimpl("Unsupported exit reason : 0x%llx\n",
+ run->tec_exit.exit_reason);
+ return 0;
+}
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index f9c3dbc99..ecdd35527 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -27,6 +27,10 @@
#include <asm/kvm_coproc.h>
#include <asm/sigcontext.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
+
#include "trace.h"
struct kvm_stats_debugfs_item debugfs_entries[] = {
@@ -818,6 +822,10 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
bool has_esr = events->exception.serror_has_esr;
bool ext_dabt_pending = events->exception.ext_dabt_pending;
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ return kvm_cvm_vcpu_set_events(vcpu, serror_pending, ext_dabt_pending);
+#endif
if (serror_pending && has_esr) {
if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
return -EINVAL;
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 452f4cacd..54d541767 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -229,6 +229,25 @@ void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
}
}
+#ifdef CONFIG_CVM_HOST
+void __vgic_v3_restore_tec_state(struct vgic_v3_cpu_if *cpu_if,
+ u64 *entry_hcr,
+ u64 *entry_lrs)
+{
+ u64 used_lrs = cpu_if->used_lrs;
+ int i;
+
+ *entry_hcr = cpu_if->vgic_hcr;
+ for (i = 0; i < kvm_vgic_global_state.nr_lr; ++i) {
+ if (i < used_lrs) {
+ entry_lrs[i] = cpu_if->vgic_lr[i];
+ } else {
+ entry_lrs[i] = 0;
+ }
+ }
+}
+#endif
+
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if)
{
u64 used_lrs = cpu_if->used_lrs;
diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c
index 6a2826f1b..6fc49784b 100644
--- a/arch/arm64/kvm/mmio.c
+++ b/arch/arm64/kvm/mmio.c
@@ -8,6 +8,10 @@
#include <asm/kvm_emulate.h>
#include <trace/events/kvm.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
+
#include "trace.h"
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
@@ -109,6 +113,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
&data);
data = vcpu_data_host_to_guest(vcpu, data, len);
vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->
+ tec_entry.gprs[0] = data;
+ }
+#endif
}
/*
@@ -177,7 +187,12 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
run->mmio.phys_addr = fault_ipa;
run->mmio.len = len;
vcpu->mmio_needed = 1;
-
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_entry.flags |=
+ TEC_ENTRY_FLAG_EMUL_MMIO;
+ }
+#endif
if (!ret) {
/* We handled the access successfully in the kernel. */
if (!is_write)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 6fa92a143..30426f6ad 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -474,6 +474,7 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
spin_lock(&kvm->mmu_lock);
pgt = mmu->pgt;
+
if (pgt) {
mmu->pgd_phys = 0;
mmu->pgt = NULL;
@@ -790,6 +791,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level);
write_fault = kvm_is_write_fault(vcpu);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ write_fault = true;
+ prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W;
+ }
+#endif
exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
VM_BUG_ON(write_fault && exec_fault);
vcpu->stat.mabt_exit_stat++;
diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
index 32bb26be8..0160ee8d6 100644
--- a/arch/arm64/kvm/psci.c
+++ b/arch/arm64/kvm/psci.c
@@ -16,6 +16,9 @@
#include <kvm/arm_psci.h>
#include <kvm/arm_hypercalls.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
/*
* This is an implementation of the Power State Coordination Interface
* as described in ARM document number ARM DEN 0022A.
@@ -78,6 +81,10 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
*/
if (!vcpu)
return PSCI_RET_INVALID_PARAMS;
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ cvm_psci_complete(source_vcpu, vcpu);
+#endif
if (!vcpu->arch.power_off) {
if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
return PSCI_RET_ALREADY_ON;
@@ -133,7 +140,10 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
/* Ignore other bits of target affinity */
target_affinity &= target_affinity_mask;
-
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ return cvm_psci_vcpu_affinity_info(vcpu, target_affinity, lowest_affinity_level);
+#endif
/*
* If one or more VCPU matching target affinity are running
* then ON else OFF
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 51f4c5e85..bb177d58c 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -30,6 +30,9 @@
#include <asm/kvm_mmu.h>
#include <asm/virt.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
/* Maximum phys_shift supported for any VM on this host */
static u32 kvm_ipa_limit;
@@ -199,6 +202,9 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
{
kfree(vcpu->arch.sve_state);
+#ifdef CONFIG_CVM_HOST
+ kvm_destroy_tec(vcpu);
+#endif
}
static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
@@ -433,7 +439,11 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
u32 parange, phys_shift;
u8 lvls, pbha = 0xf;
+#ifdef CONFIG_CVM_HOST
+ if ((type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) && (!kvm_is_cvm(kvm)))
+#else
if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
+#endif
return -EINVAL;
phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
diff --git a/arch/arm64/kvm/tmi.c b/arch/arm64/kvm/tmi.c
new file mode 100644
index 000000000..6eb5dbd97
--- /dev/null
+++ b/arch/arm64/kvm/tmi.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+#include <linux/arm-smccc.h>
+#include <asm/kvm_tmi.h>
+
+u64 tmi_version(void)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_VESION, &res);
+ return res.a1;
+}
+
+u64 tmi_data_create(u64 data, u64 rd, u64 map_addr, u64 src, u64 level)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_DATA_CREATE, data, rd, map_addr, src, level, &res);
+ return res.a1;
+}
+
+u64 tmi_data_destroy(u64 rd, u64 map_addr, u64 level)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_DATA_DESTROY, rd, map_addr, level, &res);
+ return res.a1;
+}
+
+u64 tmi_cvm_activate(u64 rd)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_CVM_ACTIVATE, rd, &res);
+ return res.a1;
+}
+
+u64 tmi_cvm_create(u64 rd, u64 params_ptr)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_CVM_CREATE, rd, params_ptr, &res);
+ return res.a1;
+}
+
+u64 tmi_cvm_destroy(u64 rd)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_CVM_DESTROY, rd, &res);
+ return res.a1;
+}
+
+u64 tmi_tec_create(u64 tec, u64 rd, u64 mpidr, u64 params_ptr)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TEC_CREATE, tec, rd, mpidr, params_ptr, &res);
+ return res.a1;
+}
+
+u64 tmi_tec_destroy(u64 tec)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TEC_DESTROY, tec, &res);
+ return res.a1;
+}
+
+u64 tmi_tec_enter(u64 tec, u64 run_ptr)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TEC_ENTER, tec, run_ptr, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_create(u64 ttt, u64 rd, u64 map_addr, u64 level)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_CREATE, ttt, rd, map_addr, level, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_destroy(u64 ttt, u64 rd, u64 map_addr, u64 level)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_DESTROY, ttt, rd, map_addr, level, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_map_unprotected(u64 rd, u64 map_addr, u64 level, u64 ttte)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_MAP_UNPROTECTED, rd, map_addr, level, ttte, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_unmap_unprotected(u64 rd, u64 map_addr, u64 level, u64 ns)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_UNMAP_UNPROTECTED, rd, map_addr, level, ns, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_unmap_protected(u64 rd, u64 map_addr, u64 level)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_UNMAP_PROTECTED, rd, map_addr, level, &res);
+ return res.a1;
+}
+
+u64 tmi_psci_complete(u64 calling_tec, u64 target_tec)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_PSCI_COMPLETE, calling_tec, target_tec, &res);
+ return res.a1;
+}
+
+u64 tmi_features(u64 index)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_FEATURES, index, &res);
+ return res.a1;
+}
+
+u64 tmi_mem_alloc(u64 rd, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type,
+ enum tmi_tmm_map_size tmm_map_size)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_MEM_ALLOC, rd, numa_id, tmm_mem_type, tmm_map_size, &res);
+ return res.a1;
+}
+
+u64 tmi_mem_free(u64 pa, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type,
+ enum tmi_tmm_map_size tmm_map_size)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_MEM_FREE, pa, numa_id, tmm_mem_type, tmm_map_size, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_map_range(u64 rd, u64 map_addr, u64 size, u64 cur_node, u64 target_node)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_MAP_RANGE, rd, map_addr, size, cur_node, target_node, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_unmap_range(u64 rd, u64 map_addr, u64 size, u64 node_id)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_UNMAP_RANGE, rd, map_addr, size, node_id, &res);
+ return res.a1;
+}
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 213afce81..c80730152 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -10,6 +10,10 @@
#include <asm/kvm_mmu.h>
#include <asm/kvm_asm.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
+
#include "vgic.h"
static bool group0_trap;
@@ -674,7 +678,10 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
kvm_vgic_global_state.vcpu_base = 0;
} else {
kvm_vgic_global_state.vcpu_base = info->vcpu.start;
- kvm_vgic_global_state.can_emulate_gicv2 = true;
+#ifdef CONFIG_CVM_HOST
+ if (!static_branch_unlikely(&kvm_cvm_is_available))
+#endif
+ kvm_vgic_global_state.can_emulate_gicv2 = true;
ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
if (ret) {
kvm_err("Cannot register GICv2 KVM device.\n");
@@ -735,7 +742,12 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
-
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ cpu_if->vgic_vmcr = ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_exit.gicv3_vmcr;
+ return;
+ }
+#endif
if (likely(cpu_if->vgic_sre))
cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
}
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index 116aa91d5..ef9ffea7a 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -12,6 +12,10 @@
#include <asm/kvm_hyp.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
+
#include "vgic.h"
#define CREATE_TRACE_POINTS
@@ -872,11 +876,42 @@ static inline bool can_access_vgic_from_kernel(void)
return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe();
}
+#ifdef CONFIG_CVM_HOST
+static inline void vgic_tmm_save_state(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ struct tmi_tec_run *tec_run = vcpu->arch.tec.tec_run;
+
+ for (i = 0; i < kvm_vgic_global_state.nr_lr; ++i) {
+ cpu_if->vgic_lr[i] = tec_run->tec_exit.gicv3_lrs[i];
+ tec_run->tec_entry.gicv3_lrs[i] = 0;
+ }
+}
+
+static inline void vgic_tmm_restore_state(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ struct tmi_tec_run *tec_run = vcpu->arch.tec.tec_run;
+
+ for (i = 0; i < kvm_vgic_global_state.nr_lr; ++i) {
+ tec_run->tec_entry.gicv3_lrs[i] = cpu_if->vgic_lr[i];
+ tec_run->tec_exit.gicv3_lrs[i] = cpu_if->vgic_lr[i];
+ }
+}
+#endif
+
static inline void vgic_save_state(struct kvm_vcpu *vcpu)
{
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_save_state(vcpu);
else
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ vgic_tmm_save_state(vcpu);
+ else
+#endif
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
}
@@ -907,6 +942,13 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_restore_state(vcpu);
else
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)){
+ vgic_tmm_restore_state(vcpu);
+ return;
+ }
+ else
+#endif
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
}
@@ -948,7 +990,10 @@ void kvm_vgic_load(struct kvm_vcpu *vcpu)
{
if (unlikely(!vgic_initialized(vcpu->kvm)))
return;
-
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ return;
+#endif
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_load(vcpu);
else
@@ -959,7 +1004,10 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
{
if (unlikely(!vgic_initialized(vcpu->kvm)))
return;
-
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ return;
+#endif
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_put(vcpu);
else
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 413d6f9bc..18ccd16fc 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -117,4 +117,8 @@ void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
u32 timer_get_ctl(struct arch_timer_context *ctxt);
u64 timer_get_cval(struct arch_timer_context *ctxt);
+#ifdef CONFIG_CVM_HOST
+/* Needed for S-EL2 */
+void kvm_cvm_timers_update(struct kvm_vcpu *vcpu);
+#endif
#endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 595c9da4f..1cb861d6c 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -345,6 +345,27 @@ struct kvm_vcpu {
struct kvm_vcpu_arch arch;
};
+#ifdef CONFIG_CVM_HOST
+#define KVM_TYPE_CVM_BIT 8
+#define CVM_MAX_HALT_POLL_NS 100000
+
+DECLARE_STATIC_KEY_FALSE(kvm_cvm_is_available);
+
+static __always_inline bool vcpu_is_tec(struct kvm_vcpu *vcpu)
+{
+ if (static_branch_unlikely(&kvm_cvm_is_available)) {
+ return vcpu->arch.tec.tec_run;
+ }
+ return false;
+}
+
+static inline bool kvm_arm_cvm_type(unsigned long type)
+{
+ return type & (1UL << KVM_TYPE_CVM_BIT);
+}
+
+#endif
+
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
/*
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index a1d8b1184..3332ee9ed 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1373,6 +1373,35 @@ struct kvm_master_dev_info {
#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
#define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
+#ifdef CONFIG_CVM_HOST
+#define KVM_LOAD_USER_DATA _IOW(KVMIO, 0x49, struct kvm_user_data)
+
+#define KVM_CAP_ARM_TMM 300 /* FIXME: Large number to prevent conflicts */
+#define MAX_NUMA_NODE 8
+#define MAX_CPU_BIT_MAP 0
+
+struct kvm_numa_node {
+ __u64 numa_id;
+ __u64 ipa_start;
+ __u64 ipa_size;
+ int64_t host_numa_node;
+ __u64 cpu_id[MAX_CPU_BIT_MAP];
+};
+
+struct kvm_numa_info {
+ __u64 numa_cnt;
+ struct kvm_numa_node numa_nodes[MAX_NUMA_NODE];
+};
+
+struct kvm_user_data {
+ __u64 loader_start;
+ __u64 initrd_start;
+ __u64 initrd_size;
+ __u64 ram_size;
+ struct kvm_numa_info numa_info;
+};
+#endif
+
/* enable ucontrol for s390 */
struct kvm_s390_ucas_mapping {
__u64 user_addr;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9166ef044..ef9f6d9df 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1089,7 +1089,12 @@ static struct kvm *kvm_create_vm(unsigned long type)
goto out_err_no_arch_destroy_vm;
}
- kvm->max_halt_poll_ns = halt_poll_ns;
+#ifdef CONFIG_CVM_HOST
+ if (kvm_arm_cvm_type(type))
+ kvm->max_halt_poll_ns = CVM_MAX_HALT_POLL_NS;
+ else
+#endif
+ kvm->max_halt_poll_ns = halt_poll_ns;
r = kvm_arch_init_vm(kvm, type);
if (r)
--
2.33.0
2
1
From: Jingxian He <hejingxian(a)huawei.com>
hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
CVE: N/A
------------
Add host support for Confidential VMs:
1. Add new kvm_type for cvm.
2. Init cvm related data while user create vm with cvm type.
3. Add cvm hypervisor while run in sel2 which named tmm.
4. Kvm call tmm interface to create cvm stage2 pagetable and run cvm.
Signed-off-by: Jingxian He <hejingxian(a)huawei.com>
---
arch/arm64/configs/defconfig | 1 +
arch/arm64/configs/openeuler_defconfig | 1 +
arch/arm64/include/asm/kvm_emulate.h | 14 +
arch/arm64/include/asm/kvm_host.h | 12 +
arch/arm64/include/asm/kvm_tmi.h | 373 +++++++++++
arch/arm64/include/asm/kvm_tmm.h | 72 +++
arch/arm64/kvm/Kconfig | 8 +
arch/arm64/kvm/Makefile | 4 +
arch/arm64/kvm/arch_timer.c | 104 +++-
arch/arm64/kvm/arm.c | 131 +++-
arch/arm64/kvm/cvm.c | 824 +++++++++++++++++++++++++
arch/arm64/kvm/cvm_exit.c | 229 +++++++
arch/arm64/kvm/guest.c | 8 +
arch/arm64/kvm/hyp/vgic-v3-sr.c | 19 +
arch/arm64/kvm/mmio.c | 17 +-
arch/arm64/kvm/mmu.c | 7 +
arch/arm64/kvm/psci.c | 12 +-
arch/arm64/kvm/reset.c | 10 +
arch/arm64/kvm/tmi.c | 148 +++++
arch/arm64/kvm/vgic/vgic-v3.c | 16 +-
arch/arm64/kvm/vgic/vgic.c | 52 +-
include/kvm/arm_arch_timer.h | 4 +
include/linux/kvm_host.h | 21 +
include/uapi/linux/kvm.h | 29 +
virt/kvm/kvm_main.c | 7 +-
25 files changed, 2105 insertions(+), 18 deletions(-)
create mode 100644 arch/arm64/include/asm/kvm_tmi.h
create mode 100644 arch/arm64/include/asm/kvm_tmm.h
create mode 100644 arch/arm64/kvm/cvm.c
create mode 100644 arch/arm64/kvm/cvm_exit.c
create mode 100644 arch/arm64/kvm/tmi.c
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index d025bafcc..ace2bf4ad 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -110,6 +110,7 @@ CONFIG_ACPI_APEI_MEMORY_FAILURE=y
CONFIG_ACPI_APEI_EINJ=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
+CONFIG_CVM_HOST=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 5ad5e4378..e298ca7e5 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -762,6 +762,7 @@ CONFIG_ACPI_PPTT=y
CONFIG_IRQ_BYPASS_MANAGER=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
+CONFIG_CVM_HOST=y
CONFIG_HAVE_KVM_IRQCHIP=y
CONFIG_HAVE_KVM_IRQFD=y
CONFIG_HAVE_KVM_IRQ_ROUTING=y
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index fb3e3f613..ab1aebd1f 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -556,4 +556,18 @@ static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
return test_bit(feature, vcpu->arch.features);
}
+#ifdef CONFIG_CVM_HOST
+static inline bool kvm_is_cvm(struct kvm *kvm)
+{
+ if (static_branch_unlikely(&kvm_cvm_is_available)) {
+ return kvm->arch.is_cvm;
+ }
+ return false;
+}
+
+static inline enum cvm_state kvm_cvm_state(struct kvm *kvm)
+{
+ return READ_ONCE(kvm->arch.cvm.state);
+}
+#endif
#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 8bb67dfb9..01b8f9331 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -26,6 +26,9 @@
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/thread_info.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmm.h>
+#endif
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -127,6 +130,11 @@ struct kvm_arch {
cpumask_t *dvm_cpumask; /* Union of all vcpu's cpus_ptr */
u64 lsudvmbm_el2;
#endif
+
+#ifdef CONFIG_CVM_HOST
+ struct cvm cvm;
+ bool is_cvm;
+#endif
};
struct kvm_vcpu_fault_info {
@@ -405,6 +413,10 @@ struct kvm_vcpu_arch {
cpumask_t *cpus_ptr;
cpumask_t *pre_cpus_ptr;
#endif
+
+#ifdef CONFIG_CVM_HOST
+ struct cvm_tec tec;
+#endif
};
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
diff --git a/arch/arm64/include/asm/kvm_tmi.h b/arch/arm64/include/asm/kvm_tmi.h
new file mode 100644
index 000000000..554b3e439
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_tmi.h
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+#ifndef __TMM_TMI_H
+#define __TMM_TMI_H
+#include <linux/kvm_host.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_pgtable.h>
+#include <linux/virtio_ring.h>
+
+#define GRANULE_SIZE 4096
+
+#define NO_NUMA -1
+
+#define TMM_TTT_LEVEL_3 3
+
+#ifdef CONFIG_CVM_HOST_FVP_PLAT
+#define CVM_MEM_BASE ULL(0x8800000000) /* choose FVP platform to run cVM */
+#define VQ_NUM 3
+#else
+#define CVM_MEM_BASE ULL(0x800000000) /* choose qemu platform to run cVM */
+#define VQ_NUM 3
+#endif
+
+#define MEM_SEG_NUMS 2
+
+/* define in QEMU hw/arm/virt.c */
+#define VIRT_PCIE_MMIO 0x10000000 /* 256MB */
+#define VIRT_PCIE_MMIO_SIZE 0x1000000 /* 16MB */
+#define VIRT_HIGH_PCIE_ECAM 0x8000000000 /* 512GB */
+#define VIRT_HIGH_PCIE_ECAM_SIZE 0x12000000 /* 288MB */
+
+/* TMI error codes. */
+#define TMI_SUCCESS 0
+#define TMI_ERROR_INPUT 1
+#define TMI_ERROR_MEMORY 2
+#define TMI_ERROR_ALIAS 3
+#define TMI_ERROR_IN_USE 4
+#define TMI_ERROR_CVM_STATE 5
+#define TMI_ERROR_OWNER 6
+#define TMI_ERROR_TEC 7
+#define TMI_ERROR_TTT_WALK 8
+#define TMI_ERROR_TTT_ENTRY 9
+#define TMI_ERROR_NOT_SUPPORTED 10
+#define TMI_ERROR_INTERNAL 11
+#define TMI_ERROR_CVM_POWEROFF 12
+
+#define TMI_RETURN_STATUS(ret) ((ret) & 0xFF)
+#define TMI_RETURN_INDEX(ret) (((ret) >> 8) & 0xFF)
+
+#define TMI_FEATURE_REGISTER_0_S2SZ GENMASK(7, 0)
+#define TMI_FEATURE_REGISTER_0_LPA2 BIT(8)
+#define TMI_FEATURE_REGISTER_0_SVE_EN BIT(9)
+#define TMI_FEATURE_REGISTER_0_SVE_VL GENMASK(13, 10)
+#define TMI_FEATURE_REGISTER_0_NUM_BPS GENMASK(17, 14)
+#define TMI_FEATURE_REGISTER_0_NUM_WPS GENMASK(21, 18)
+#define TMI_FEATURE_REGISTER_0_PMU_EN BIT(22)
+#define TMI_FEATURE_REGISTER_0_PMU_NUM_CTRS GENMASK(27, 23)
+#define TMI_FEATURE_REGISTER_0_HASH_SHA_256 BIT(28)
+#define TMI_FEATURE_REGISTER_0_HASH_SHA_512 BIT(29)
+
+#define TMI_CVM_PARAM_FLAG_LPA2 BIT(0)
+#define TMI_CVM_PARAM_FLAG_SVE BIT(1)
+#define TMI_CVM_PARAM_FLAG_PMU BIT(2)
+
+/*
+ * Many of these fields are smaller than u64 but all fields have u64
+ * alignment, so use u64 to ensure correct alignment.
+ */
+typedef struct tmi_cvm_params {
+ u64 flags;
+ u64 s2sz;
+ u64 sve_vl;
+ u64 num_bps;
+ u64 num_wps;
+ u64 pmu_num_cnts;
+ u64 measurement_algo;
+ u64 vmid;
+ u64 ns_vtcr;
+ u64 vttbr_el2;
+ u64 ttt_base;
+ s64 ttt_level_start;
+ u64 ttt_num_start;
+ u8 rpv[64]; /* Bits 512 */
+} tmi_cvm_params_t;
+
+#define TMI_NOT_RUNNABLE 0
+#define TMI_RUNNABLE 1
+
+/*
+ * The number of GPRs (starting from X0) that are
+ * configured by the host when a TEC is created.
+ */
+#define TEC_CREATE_NR_GPRS (8U)
+
+struct tmi_tec_params {
+ uint64_t gprs[TEC_CREATE_NR_GPRS];
+ uint64_t pc;
+ uint64_t flags;
+ uint64_t ram_size;
+};
+
+#define TEC_ENTRY_FLAG_EMUL_MMIO (1UL << 0U)
+#define TEC_ENTRY_FLAG_INJECT_SEA (1UL << 1U)
+#define TEC_ENTRY_FLAG_TRAP_WFI (1UL << 2U)
+#define TEC_ENTRY_FLAG_TRAP_WFE (1UL << 3U)
+
+#define TMI_EXIT_SYNC 0
+#define TMI_EXIT_IRQ 1
+#define TMI_EXIT_FIQ 2
+#define TMI_EXIT_PSCI 3
+#define TMI_EXIT_HOST_CALL 5
+#define TMI_EXIT_SERROR 6
+
+/*
+ * The number of GPRs (starting from X0) per voluntary exit context.
+ * Per SMCCC.
+ */
+ #define TEC_EXIT_NR_GPRS (31U)
+
+/* Maximum number of Interrupt Controller List Registers. */
+#define TEC_GIC_NUM_LRS (16U)
+
+struct tmi_tec_entry {
+ uint64_t flags;
+ uint64_t gprs[TEC_EXIT_NR_GPRS];
+ uint64_t gicv3_lrs[TEC_GIC_NUM_LRS];
+ uint64_t gicv3_hcr;
+};
+
+struct tmi_tec_exit {
+ uint64_t exit_reason;
+ uint64_t esr;
+ uint64_t far;
+ uint64_t hpfar;
+ uint64_t gprs[TEC_EXIT_NR_GPRS];
+ uint64_t gicv3_hcr;
+ uint64_t gicv3_lrs[TEC_GIC_NUM_LRS];
+ uint64_t gicv3_misr;
+ uint64_t gicv3_vmcr;
+ uint64_t cntv_ctl;
+ uint64_t cntv_cval;
+ uint64_t cntp_ctl;
+ uint64_t cntp_cval;
+ uint64_t imm;
+};
+
+struct tmi_tec_run {
+ struct tmi_tec_entry tec_entry;
+ struct tmi_tec_exit tec_exit;
+};
+
+#define TMI_FNUM_MIN_VALUE U(0x150)
+#define TMI_FNUM_MAX_VALUE U(0x18F)
+
+/******************************************************************************
+ * Bit definitions inside the function id as per the SMC calling convention
+ ******************************************************************************/
+#define FUNCID_TYPE_SHIFT 31
+#define FUNCID_CC_SHIFT 30
+#define FUNCID_OEN_SHIFT 24
+#define FUNCID_NUM_SHIFT 0
+
+#define FUNCID_TYPE_MASK 0x1
+#define FUNCID_CC_MASK 0x1
+#define FUNCID_OEN_MASK 0x3f
+#define FUNCID_NUM_MASK 0xffff
+
+#define FUNCID_TYPE_WIDTH 1
+#define FUNCID_CC_WIDTH 1
+#define FUNCID_OEN_WIDTH 6
+#define FUNCID_NUM_WIDTH 16
+
+#define SMC_64 1
+#define SMC_32 0
+#define SMC_TYPE_FAST 1
+#define SMC_TYPE_STD 0
+
+/*****************************************************************************
+ * Owning entity number definitions inside the function id as per the SMC
+ * calling convention
+ *****************************************************************************/
+#define OEN_ARM_START 0
+#define OEN_ARM_END 0
+#define OEN_CPU_START 1
+#define OEN_CPU_END 1
+#define OEN_SIP_START 2
+#define OEN_SIP_END 2
+#define OEN_OEM_START 3
+#define OEN_OEM_END 3
+#define OEN_STD_START 4 /* Standard Calls */
+#define OEN_STD_END 4
+#define OEN_TAP_START 48 /* Trusted Applications */
+#define OEN_TAP_END 49
+#define OEN_TOS_START 50 /* Trusted OS */
+#define OEN_TOS_END 63
+#define OEN_LIMIT 64
+
+/* Get TMI fastcall std FID from funtion number */
+#define TMI_FID(smc_cc, func_num) \
+ ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT) | \
+ ((smc_cc) << FUNCID_CC_SHIFT) | \
+ (OEN_STD_START << FUNCID_OEN_SHIFT) | \
+ ((func_num) << FUNCID_NUM_SHIFT) )
+
+#define U(_x) (_x##U)
+
+/*
+ * SMC_TMM_INIT_COMPLETE is the only function in the TMI that originates from
+ * the CVM world and is handled by the SPMD. The remaining functions are
+ * always invoked by the Normal world, forward by SPMD and handled by the
+ * TMM.
+ */
+#define TMI_FNUM_VERSION U(0x260)
+#define TMI_FNUM_MEM_ALLOC U(0x261)
+#define TMI_FNUM_MEM_FREE U(0x262)
+#define TMI_FNUM_DATA_CREATE U(0x263)
+#define TMI_FNUM_DATA_DESTROY U(0x265)
+#define TMI_FNUM_CVM_ACTIVATE U(0x267)
+#define TMI_FNUM_CVM_CREATE U(0x268)
+#define TMI_FNUM_CVM_DESTROY U(0x269)
+#define TMI_FNUM_TEC_CREATE U(0x27A)
+#define TMI_FNUM_TEC_DESTROY U(0x27B)
+#define TMI_FNUM_TEC_ENTER U(0x27C)
+#define TMI_FNUM_TTT_CREATE U(0x27D)
+#define TMI_FNUM_TTT_DESTROY U(0x27E)
+#define TMI_FNUM_TTT_MAP_UNPROTECTED U(0x27F)
+#define TMI_FNUM_TTT_MAP_PROTECTED U(0x280)
+#define TMI_FNUM_TTT_UNMAP_UNPROTECTED U(0x282)
+#define TMI_FNUM_TTT_UNMAP_PROTECTED U(0x283)
+#define TMI_FNUM_PSCI_COMPLETE U(0x284)
+#define TMI_FNUM_FEATURES U(0x285)
+#define TMI_FNUM_TTT_MAP_RANGE U(0x286)
+#define TMI_FNUM_TTT_UNMAP_RANGE U(0x287)
+
+/* TMI SMC64 PIDs handled by the SPMD */
+#define TMI_TMM_VESION TMI_FID(SMC_64, TMI_FNUM_VERSION)
+#define TMI_TMM_DATA_CREATE TMI_FID(SMC_64, TMI_FNUM_DATA_CREATE)
+#define TMI_TMM_DATA_DESTROY TMI_FID(SMC_64, TMI_FNUM_DATA_DESTROY)
+#define TMI_TMM_CVM_ACTIVATE TMI_FID(SMC_64, TMI_FNUM_CVM_ACTIVATE)
+#define TMI_TMM_CVM_CREATE TMI_FID(SMC_64, TMI_FNUM_CVM_CREATE)
+#define TMI_TMM_CVM_DESTROY TMI_FID(SMC_64, TMI_FNUM_CVM_DESTROY)
+#define TMI_TMM_TEC_CREATE TMI_FID(SMC_64, TMI_FNUM_TEC_CREATE)
+#define TMI_TMM_TEC_DESTROY TMI_FID(SMC_64, TMI_FNUM_TEC_DESTROY)
+#define TMI_TMM_TEC_ENTER TMI_FID(SMC_64, TMI_FNUM_TEC_ENTER)
+#define TMI_TMM_TTT_CREATE TMI_FID(SMC_64, TMI_FNUM_TTT_CREATE)
+#define TMI_TMM_TTT_DESTROY TMI_FID(SMC_64, TMI_FNUM_TTT_DESTROY)
+#define TMI_TMM_TTT_MAP_UNPROTECTED TMI_FID(SMC_64, TMI_FNUM_TTT_MAP_UNPROTECTED)
+#define TMI_TMM_TTT_MAP_PROTECTED TMI_FID(SMC_64, TMI_FNUM_TTT_MAP_PROTECTED)
+#define TMI_TMM_TTT_UNMAP_UNPROTECTED TMI_FID(SMC_64, TMI_FNUM_TTT_UNMAP_UNPROTECTED)
+#define TMI_TMM_TTT_UNMAP_PROTECTED TMI_FID(SMC_64, TMI_FNUM_TTT_UNMAP_PROTECTED)
+#define TMI_TMM_PSCI_COMPLETE TMI_FID(SMC_64, TMI_FNUM_PSCI_COMPLETE)
+#define TMI_TMM_FEATURES TMI_FID(SMC_64, TMI_FNUM_FEATURES)
+#define TMI_TMM_MEM_ALLOC TMI_FID(SMC_64, TMI_FNUM_MEM_ALLOC)
+#define TMI_TMM_MEM_FREE TMI_FID(SMC_64, TMI_FNUM_MEM_FREE)
+#define TMI_TMM_TTT_MAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_MAP_RANGE)
+#define TMI_TMM_TTT_UNMAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_UNMAP_RANGE)
+
+#define TMI_ABI_VERSION_GET_MAJOR(_version) ((_version) >> 16)
+#define TMI_ABI_VERSION_GET_MINOR(_version) ((_version) & 0xFFFF)
+
+#define TMI_ABI_VERSION_MAJOR U(0x0)
+
+/* KVM_CAP_ARM_TMM on VM fd */
+#define KVM_CAP_ARM_TMM_CONFIG_CVM_HOST 0
+#define KVM_CAP_ARM_TMM_CREATE_CVM 1
+#define KVM_CAP_ARM_TMM_INIT_IPA_CVM 2
+#define KVM_CAP_ARM_TMM_POPULATE_CVM 3
+#define KVM_CAP_ARM_TMM_ACTIVATE_CVM 4
+
+#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA256 0
+#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA512 1
+
+#define KVM_CAP_ARM_TMM_RPV_SIZE 64
+
+/* List of configuration items accepted for KVM_CAP_ARM_TMM_CONFIG_CVM_HOST */
+#define KVM_CAP_ARM_TMM_CFG_RPV 0
+#define KVM_CAP_ARM_TMM_CFG_HASH_ALGO 1
+#define KVM_CAP_ARM_TMM_CFG_SVE 2
+#define KVM_CAP_ARM_TMM_CFG_DBG 3
+#define KVM_CAP_ARM_TMM_CFG_PMU 4
+
+DECLARE_STATIC_KEY_FALSE(kvm_cvm_is_available);
+DECLARE_STATIC_KEY_FALSE(kvm_cvm_is_enable);
+
+struct kvm_cap_arm_tmm_config_item {
+ __u32 cfg;
+ union {
+ /* cfg == KVM_CAP_ARM_TMM_CFG_RPV */
+ struct {
+ __u8 rpv[KVM_CAP_ARM_TMM_RPV_SIZE];
+ };
+
+ /* cfg == KVM_CAP_ARM_TMM_CFG_HASH_ALGO */
+ struct {
+ __u32 hash_algo;
+ };
+
+ /* cfg == KVM_CAP_ARM_TMM_CFG_SVE */
+ struct {
+ __u32 sve_vq;
+ };
+
+ /* cfg == KVM_CAP_ARM_TMM_CFG_DBG */
+ struct {
+ __u32 num_brps;
+ __u32 num_wrps;
+ };
+
+ /* cfg == KVM_CAP_ARM_TMM_CFG_PMU */
+ struct {
+ __u32 num_pmu_cntrs;
+ };
+ /* Fix the size of the union */
+ __u8 reserved[256];
+ };
+};
+
+enum tmi_tmm_mem_type {
+ TMM_MEM_TYPE_RD,
+ TMM_MEM_TYPE_TEC,
+ TMM_MEM_TYPE_TTT,
+ TMM_MEM_TYPE_CVM_PA,
+};
+
+enum tmi_tmm_map_size {
+ TMM_MEM_MAP_SIZE_4K,
+ TMM_MEM_MAP_SIZE_2M,
+ TMM_MEM_MAP_SIZE_1G,
+ TMM_MEM_MAP_SIZE_MAX,
+};
+
+static inline bool tmm_is_addr_ttt_level_aligned(uint64_t addr, int level)
+{
+ uint64_t mask = (1 << (12 + 9 * (3 - level))) - 1;
+ return (addr & mask) == 0;
+}
+
+u64 phys_to_cvm_phys(u64 phys);
+
+u64 tmi_version(void);
+u64 tmi_data_create(u64 data, u64 rd, u64 map_addr, u64 src, u64 level);
+u64 tmi_data_destroy(u64 rd, u64 map_addr, u64 level);
+u64 tmi_cvm_activate(u64 rd);
+u64 tmi_cvm_create(u64 rd, u64 params_ptr);
+u64 tmi_cvm_destroy(u64 rd);
+u64 tmi_tec_create(u64 tec, u64 rd, u64 mpidr, u64 params_ptr);
+u64 tmi_tec_destroy(u64 tec);
+u64 tmi_tec_enter(u64 tec, u64 run_ptr);
+u64 tmi_ttt_create(u64 ttt, u64 rd, u64 map_addr, u64 level);
+u64 tmi_ttt_destroy(u64 ttt, u64 rd, u64 map_addr, u64 level);
+u64 tmi_ttt_map_unprotected(u64 rd, u64 map_addr, u64 level, u64 ttte);
+u64 tmi_ttt_unmap_unprotected(u64 rd, u64 map_addr, u64 level, u64 ns);
+u64 tmi_ttt_unmap_protected(u64 rd, u64 map_addr, u64 level);
+u64 tmi_psci_complete(u64 calling_tec, u64 target_tec);
+u64 tmi_features(u64 index);
+u64 tmi_ttt_map_range(u64 rd, u64 map_addr, u64 size, u64 cur_node, u64 target_node);
+u64 tmi_ttt_unmap_range(u64 rd, u64 map_addr, u64 size, u64 node_id);
+
+u64 tmi_mem_alloc(u64 rd, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type,
+ enum tmi_tmm_map_size tmm_map_size);
+u64 tmi_mem_free(u64 pa, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type,
+ enum tmi_tmm_map_size tmm_map_size);
+
+void kvm_cvm_vcpu_put(struct kvm_vcpu *vcpu);
+int kvm_load_user_data(struct kvm *kvm, unsigned long arg);
+unsigned long cvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu,
+ unsigned long target_affinity, unsigned long lowest_affinity_level);
+int kvm_cvm_vcpu_set_events(struct kvm_vcpu *vcpu,
+ bool serror_pending, bool ext_dabt_pending);
+
+#endif
diff --git a/arch/arm64/include/asm/kvm_tmm.h b/arch/arm64/include/asm/kvm_tmm.h
new file mode 100644
index 000000000..41383494f
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_tmm.h
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+#ifndef __ASM_KVM_TMM_H
+#define __ASM_KVM_TMM_H
+
+#include <uapi/linux/kvm.h>
+
+enum cvm_state {
+ CVM_STATE_NONE,
+ CVM_STATE_NEW,
+ CVM_STATE_ACTIVE,
+ CVM_STATE_DYING
+};
+
+struct cvm {
+ enum cvm_state state;
+ u32 cvm_vmid;
+ u64 rd;
+ u64 loader_start;
+ u64 initrd_start;
+ u64 initrd_size;
+ u64 ram_size;
+ struct kvm_numa_info numa_info;
+ struct tmi_cvm_params *params;
+};
+
+/*
+ * struct cvm_tec - Additional per VCPU data for a CVM
+ */
+struct cvm_tec {
+ u64 tec;
+ bool tec_created;
+ void *tec_run;
+};
+
+int kvm_init_tmm(void);
+int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap);
+int kvm_init_cvm_vm(struct kvm *kvm);
+void kvm_destroy_cvm(struct kvm *kvm);
+int kvm_create_tec(struct kvm_vcpu *vcpu);
+void kvm_destroy_tec(struct kvm_vcpu *vcpu);
+int kvm_tec_enter(struct kvm_vcpu *vcpu);
+int handle_cvm_exit(struct kvm_vcpu *vcpu, int rec_run_status);
+int kvm_arm_create_cvm(struct kvm *kvm);
+void kvm_free_rd(struct kvm *kvm);
+int cvm_create_rd(struct kvm *kvm);
+int kvm_arm_cvm_first_run(struct kvm_vcpu *vcpu);
+int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target);
+int kvm_arch_tec_init(struct kvm_vcpu *vcpu);
+
+void kvm_cvm_unmap_destroy_range(struct kvm *kvm);
+
+#define CVM_TTT_BLOCK_LEVEL 2
+#define CVM_TTT_MAX_LEVEL 3
+
+#define CVM_PAGE_SHIFT 12
+#define CVM_PAGE_SIZE BIT(CVM_PAGE_SHIFT)
+#define CVM_TTT_LEVEL_SHIFT(l) \
+ ((CVM_PAGE_SHIFT - 3) * (4 - (l)) + 3)
+#define CVM_L2_BLOCK_SIZE BIT(CVM_TTT_LEVEL_SHIFT(2))
+
+static inline unsigned long cvm_ttt_level_mapsize(int level)
+{
+ if (WARN_ON(level > CVM_TTT_BLOCK_LEVEL))
+ return CVM_PAGE_SIZE;
+
+ return (1UL << CVM_TTT_LEVEL_SHIFT(level));
+}
+
+#endif
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index d984a6041..7c24a4d33 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -46,6 +46,14 @@ menuconfig KVM
If unsure, say N.
+config CVM_HOST
+ bool "CVM_Enabled"
+ depends on KVM && ARM64
+ help
+ Support CVM based on S-EL2
+
+ If unsure, say N.
+
if KVM
source "virt/kvm/Kconfig"
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 395d65165..3b92eaa4f 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -24,5 +24,9 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
vgic/vgic-its.o vgic/shadow_dev.o vgic/vgic-debug.o
+kvm-$(CONFIG_CVM_HOST) += tmi.o
+kvm-$(CONFIG_CVM_HOST) += cvm.o
+kvm-$(CONFIG_CVM_HOST) += cvm_exit.o
+
kvm-$(CONFIG_KVM_ARM_PMU) += pmu-emul.o
obj-$(CONFIG_KVM_HISI_VIRT) += hisilicon/
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index f9d6a5cd4..a2443d8da 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -16,6 +16,10 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
+
#include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h>
@@ -138,10 +142,79 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
}
}
+#ifdef CONFIG_CVM_HOST
+static bool cvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
+{
+ return timer_ctx &&
+ ((timer_get_ctl(timer_ctx) &
+ (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
+}
+
+void kvm_cvm_timers_update(struct kvm_vcpu *vcpu)
+{
+ int i;
+ u64 cval, now;
+ bool status, level;
+ struct arch_timer_context *timer;
+ struct arch_timer_cpu *arch_timer = &vcpu->arch.timer_cpu;
+
+ for (i = 0; i < NR_KVM_TIMERS; i++) {
+ timer = &arch_timer->timers[i];
+
+ if (!timer->loaded) {
+ if (!cvm_timer_irq_can_fire(timer))
+ continue;
+ cval = timer_get_cval(timer);
+ now = kvm_phys_timer_read() - timer_get_offset(timer);
+ level = (cval <= now);
+ kvm_timer_update_irq(vcpu, level, timer);
+ } else {
+ status = timer_get_ctl(timer) & ARCH_TIMER_CTRL_IT_STAT;
+ level = cvm_timer_irq_can_fire(timer) && status;
+ if (level != timer->irq.level)
+ kvm_timer_update_irq(vcpu, level, timer);
+ }
+ }
+}
+
+static void set_cvm_timers_loaded(struct kvm_vcpu *vcpu, bool loaded)
+{
+ int i;
+ struct arch_timer_cpu *arch_timer = &vcpu->arch.timer_cpu;
+
+ for (i = 0; i < NR_KVM_TIMERS; i++) {
+ struct arch_timer_context *timer = &arch_timer->timers[i];
+ timer->loaded = loaded;
+ }
+}
+
+static void kvm_timer_blocking(struct kvm_vcpu *vcpu);
+static void kvm_timer_unblocking(struct kvm_vcpu *vcpu);
+
+static inline void cvm_vcpu_load_timer_callback(struct kvm_vcpu *vcpu)
+{
+ kvm_cvm_timers_update(vcpu);
+ kvm_timer_unblocking(vcpu);
+ set_cvm_timers_loaded(vcpu, true);
+}
+
+static inline void cvm_vcpu_put_timer_callback(struct kvm_vcpu *vcpu)
+{
+ set_cvm_timers_loaded(vcpu, false);
+ if (rcuwait_active(kvm_arch_vcpu_get_wait(vcpu)))
+ kvm_timer_blocking(vcpu);
+}
+#endif
+
static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
+#ifdef CONFIG_CVM_HOST
+ if (kvm_is_cvm(vcpu->kvm))
+ return;
+#endif
+
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
__vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset;
@@ -667,6 +740,13 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct timer_map map;
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ cvm_vcpu_load_timer_callback(vcpu);
+ return;
+ }
+#endif
+
if (unlikely(!timer->enabled))
return;
@@ -752,6 +832,13 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
struct timer_map map;
struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ cvm_vcpu_put_timer_callback(vcpu);
+ return;
+ }
+#endif
+
if (unlikely(!timer->enabled))
return;
@@ -898,7 +985,12 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
ptimer->vcpu = vcpu;
/* Synchronize cntvoff across all vtimers of a VM. */
- update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
+#ifdef CONFIG_CVM_HOST
+ if (kvm_is_cvm(vcpu->kvm))
+ update_vtimer_cntvoff(vcpu, 0);
+ else
+#endif
+ update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
timer_set_offset(ptimer, 0);
hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
@@ -1356,6 +1448,16 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
return -EINVAL;
}
+#ifdef CONFIG_CVM_HOST
+ /*
+ * We don't use mapped IRQs for CVM because the TMI doesn't allow
+ * us setting the LR.HW bit in the VGIC.
+ */
+ if (vcpu_is_tec(vcpu)) {
+ return 0;
+ }
+#endif
+
get_timer_map(vcpu, &map);
if (vtimer_is_irqbypass())
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 718f6060b..32974a10e 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -38,6 +38,9 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/sections.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
#include <kvm/arm_hypercalls.h>
#include <kvm/arm_pmu.h>
@@ -108,6 +111,12 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
r = 0;
kvm->arch.return_nisv_io_abort_to_user = true;
break;
+#ifdef CONFIG_CVM_HOST
+ case KVM_CAP_ARM_TMM:
+ if (static_branch_unlikely(&kvm_cvm_is_available))
+ r = kvm_cvm_enable_cap(kvm, cap);
+ break;
+#endif
default:
r = -EINVAL;
break;
@@ -149,13 +158,29 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return ret;
#endif
+#ifdef CONFIG_CVM_HOST
+ if (kvm_arm_cvm_type(type)) {
+ ret = cvm_create_rd(kvm);
+ if (ret)
+ return ret;
+ }
+#endif
+
ret = kvm_arm_setup_stage2(kvm, type);
if (ret)
+#ifdef CONFIG_CVM_HOST
+ goto out_free_rd;
+#else
return ret;
+#endif
ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu);
if (ret)
+#ifdef CONFIG_CVM_HOST
+ goto out_free_rd;
+#else
return ret;
+#endif
ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
if (ret)
@@ -167,10 +192,21 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.max_vcpus = kvm_arm_default_max_vcpus();
set_default_csv2(kvm);
+#ifdef CONFIG_CVM_HOST
+ if (kvm_arm_cvm_type(type)) {
+ ret = kvm_init_cvm_vm(kvm);
+ if (ret)
+ goto out_free_stage2_pgd;
+ }
+#endif
return ret;
out_free_stage2_pgd:
kvm_free_stage2_pgd(&kvm->arch.mmu);
+#ifdef CONFIG_CVM_HOST
+out_free_rd:
+ kvm_free_rd(kvm);
+#endif
return ret;
}
@@ -203,6 +239,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
}
}
atomic_set(&kvm->online_vcpus, 0);
+#ifdef CONFIG_CVM_HOST
+ if (kvm_is_cvm(kvm))
+ kvm_destroy_cvm(kvm);
+#endif
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -258,11 +298,21 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = 1;
break;
case KVM_CAP_STEAL_TIME:
+#ifdef CONFIG_CVM_HOST
+ if (kvm && kvm_is_cvm(kvm))
+ r = 0;
+ else
+#endif
r = kvm_arm_pvtime_supported();
break;
case KVM_CAP_ARM_VIRT_MSI_BYPASS:
r = sdev_enable;
break;
+#ifdef CONFIG_CVM_HOST
+ case KVM_CAP_ARM_TMM:
+ r = static_key_enabled(&kvm_cvm_is_available);
+ break;
+#endif
default:
r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
break;
@@ -358,6 +408,13 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
return err;
#endif
+#ifdef CONFIG_CVM_HOST
+ if (kvm_is_cvm(vcpu->kvm)) {
+ err = kvm_arch_tec_init(vcpu);
+ if (err)
+ return err;
+ }
+#endif
return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
}
@@ -444,8 +501,23 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu->cpu = cpu;
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ if (single_task_running())
+ vcpu_clear_wfx_traps(vcpu);
+ else
+ vcpu_set_wfx_traps(vcpu);
+ }
+#endif
kvm_vgic_load(vcpu);
kvm_timer_vcpu_load(vcpu);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
+ kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
+ return;
+ }
+#endif
if (has_vhe())
kvm_vcpu_load_sysregs_vhe(vcpu);
kvm_arch_vcpu_load_fp(vcpu);
@@ -472,6 +544,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ kvm_cvm_vcpu_put(vcpu);
+ return;
+ }
+#endif
kvm_arch_vcpu_put_fp(vcpu);
if (has_vhe())
kvm_vcpu_put_sysregs_vhe(vcpu);
@@ -662,6 +740,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* Tell the rest of the code that there are userspace irqchip
* VMs in the wild.
*/
+#ifdef CONFIG_CVM_HOST
+ if (!kvm_is_cvm(kvm))
+#endif
static_branch_inc(&userspace_irqchip_in_use);
}
@@ -830,7 +911,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
ret = kvm_vcpu_first_run_init(vcpu);
if (ret)
return ret;
-
+#ifdef CONFIG_CVM_HOST
+ if (kvm_is_cvm(vcpu->kvm)) {
+ ret = kvm_arm_cvm_first_run(vcpu);
+ if (ret)
+ return ret;
+ }
+#endif
if (run->exit_reason == KVM_EXIT_MMIO) {
ret = kvm_handle_mmio_return(vcpu);
if (ret)
@@ -905,8 +992,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
trace_kvm_entry(vcpu->vcpu_id, *vcpu_pc(vcpu));
guest_enter_irqoff();
-
- ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ ret = kvm_tec_enter(vcpu);
+ else
+#endif
+ ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->stat.exits++;
@@ -961,11 +1052,16 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
* guest time.
*/
guest_exit();
- trace_kvm_exit(vcpu->vcpu_id, ret, *vcpu_pc(vcpu));
-
- /* Exit types that need handling before we can be preempted */
- handle_exit_early(vcpu, ret);
+#ifdef CONFIG_CVM_HOST
+ if (!vcpu_is_tec(vcpu)) {
+#endif
+ trace_kvm_exit(vcpu->vcpu_id, ret, *vcpu_pc(vcpu));
+ /* Exit types that need handling before we can be preempted */
+ handle_exit_early(vcpu, ret);
+#ifdef CONFIG_CVM_HOST
+ }
+#endif
preempt_enable();
/*
@@ -986,8 +1082,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
vcpu->arch.target = -1;
ret = ARM_EXCEPTION_IL;
}
-
- ret = handle_exit(vcpu, ret);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ ret = handle_cvm_exit(vcpu, ret);
+ else
+#endif
+ ret = handle_exit(vcpu, ret);
update_vcpu_stat_time(&vcpu->stat);
}
@@ -1419,6 +1519,11 @@ long kvm_arch_vm_ioctl(struct file *filp,
void __user *argp = (void __user *)arg;
switch (ioctl) {
+#ifdef CONFIG_CVM_HOST
+ case KVM_LOAD_USER_DATA: {
+ return kvm_load_user_data(kvm, arg);
+ }
+#endif
case KVM_CREATE_IRQCHIP: {
int ret;
if (!vgic_present)
@@ -1950,7 +2055,13 @@ int kvm_arch_init(void *opaque)
kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n");
return -ENODEV;
}
-
+#ifdef CONFIG_CVM_HOST
+ if (static_branch_unlikely(&kvm_cvm_is_enable) && in_hyp_mode) {
+ err = kvm_init_tmm();
+ if (err)
+ return err;
+ }
+#endif
if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
cpus_have_final_cap(ARM64_WORKAROUND_1508412))
kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
diff --git a/arch/arm64/kvm/cvm.c b/arch/arm64/kvm/cvm.c
new file mode 100644
index 000000000..11f82c07c
--- /dev/null
+++ b/arch/arm64/kvm/cvm.c
@@ -0,0 +1,824 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+#include <linux/kvm_host.h>
+#include <linux/kvm.h>
+#include <asm/kvm_tmi.h>
+#include <asm/kvm_pgtable.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_mmu.h>
+#include <asm/stage2_pgtable.h>
+#include <linux/arm-smccc.h>
+#include <kvm/arm_hypercalls.h>
+#include <kvm/arm_psci.h>
+
+/* Protects access to cvm_vmid_bitmap */
+static DEFINE_SPINLOCK(cvm_vmid_lock);
+static unsigned long *cvm_vmid_bitmap;
+DEFINE_STATIC_KEY_FALSE(kvm_cvm_is_available);
+DEFINE_STATIC_KEY_FALSE(kvm_cvm_is_enable);
+
+static int __init setup_cvm_host(char *str)
+{
+ int ret;
+ unsigned int val;
+
+ if (!str)
+ return 0;
+
+ ret = kstrtouint(str, 10, &val);
+ if (ret) {
+ pr_warn("Unable to parse cvm_guest.\n");
+ } else {
+ if (val)
+ static_branch_enable(&kvm_cvm_is_enable);
+ }
+ return ret;
+}
+early_param("cvm_host", setup_cvm_host);
+
+u64 cvm_phys_to_phys(u64 phys)
+{
+ return phys;
+}
+
+u64 phys_to_cvm_phys(u64 phys)
+{
+ return phys;
+}
+
+static int cvm_vmid_init(void)
+{
+ unsigned int vmid_count = 1 << kvm_get_vmid_bits();
+
+ cvm_vmid_bitmap = bitmap_zalloc(vmid_count, GFP_KERNEL);
+ if (!cvm_vmid_bitmap) {
+ kvm_err("%s: Couldn't allocate cvm vmid bitmap\n", __func__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static unsigned long tmm_feat_reg0;
+
+static bool tmm_supports(unsigned long feature)
+{
+ return !!u64_get_bits(tmm_feat_reg0, feature);
+}
+
+bool kvm_cvm_supports_sve(void)
+{
+ return tmm_supports(TMI_FEATURE_REGISTER_0_SVE_EN);
+}
+
+bool kvm_cvm_supports_pmu(void)
+{
+ return tmm_supports(TMI_FEATURE_REGISTER_0_PMU_EN);
+}
+
+u32 kvm_cvm_ipa_limit(void)
+{
+ return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_S2SZ);
+}
+
+u32 kvm_cvm_get_num_brps(void)
+{
+ return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_NUM_BPS);
+}
+
+u32 kvm_cvm_get_num_wrps(void)
+{
+ return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_NUM_WPS);
+}
+
+static int cvm_vmid_reserve(void)
+{
+ int ret;
+ unsigned int vmid_count = 1 << kvm_get_vmid_bits();
+
+ spin_lock(&cvm_vmid_lock);
+ ret = bitmap_find_free_region(cvm_vmid_bitmap, vmid_count, 0);
+ spin_unlock(&cvm_vmid_lock);
+
+ return ret;
+}
+
+static void cvm_vmid_release(unsigned int vmid)
+{
+ spin_lock(&cvm_vmid_lock);
+ bitmap_release_region(cvm_vmid_bitmap, vmid, 0);
+ spin_unlock(&cvm_vmid_lock);
+}
+
+static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
+{
+ u64 shift = ARM64_HW_PGTABLE_LEVEL_SHIFT(pgt->start_level - 1);
+ u64 mask = BIT(pgt->ia_bits) - 1;
+
+ return (addr & mask) >> shift;
+}
+
+static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
+{
+ struct kvm_pgtable pgt = {
+ .ia_bits = ia_bits,
+ .start_level = start_level,
+ };
+ return __kvm_pgd_page_idx(&pgt, -1ULL) + 1;
+}
+
+int kvm_arm_create_cvm(struct kvm *kvm)
+{
+ int ret;
+ struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
+ unsigned int pgd_sz;
+
+ if (!kvm_is_cvm(kvm) || kvm_cvm_state(kvm) != CVM_STATE_NONE) {
+ return 0;
+ }
+
+ ret = cvm_vmid_reserve();
+ if (ret < 0) {
+ return ret;
+ }
+ kvm->arch.cvm.cvm_vmid = ret;
+
+ pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level);
+
+ kvm->arch.cvm.params->ttt_base = phys_to_cvm_phys(kvm->arch.mmu.pgd_phys);
+ kvm->arch.cvm.params->measurement_algo = 0;
+ kvm->arch.cvm.params->ttt_level_start = kvm->arch.mmu.pgt->start_level;
+ kvm->arch.cvm.params->ttt_num_start = pgd_sz;
+ kvm->arch.cvm.params->s2sz = VTCR_EL2_IPA(kvm->arch.vtcr);
+ kvm->arch.cvm.params->vmid = kvm->arch.cvm.cvm_vmid;
+ kvm->arch.cvm.params->ns_vtcr = kvm->arch.vtcr;
+ kvm->arch.cvm.params->vttbr_el2 = kvm->arch.mmu.pgd_phys;
+ ret = tmi_cvm_create(kvm->arch.cvm.rd, __pa(kvm->arch.cvm.params));
+ if (!ret) {
+ kvm_info("KVM creates cVM: %d\n", kvm->arch.cvm.cvm_vmid);
+ }
+
+ WRITE_ONCE(kvm->arch.cvm.state, CVM_STATE_NEW);
+ kfree(kvm->arch.cvm.params);
+ kvm->arch.cvm.params = NULL;
+ return ret;
+}
+
+int cvm_create_rd(struct kvm *kvm)
+{
+ if (!static_key_enabled(&kvm_cvm_is_available))
+ return -EFAULT;
+
+ kvm->arch.cvm.rd = tmi_mem_alloc(kvm->arch.cvm.rd, NO_NUMA, TMM_MEM_TYPE_RD, TMM_MEM_MAP_SIZE_MAX);
+ if (!kvm->arch.cvm.rd) {
+ kvm_err("tmi_mem_alloc for cvm rd failed: %d\n", kvm->arch.cvm.cvm_vmid);
+ return -ENOMEM;
+ }
+ kvm->arch.is_cvm = true;
+ return 0;
+}
+
+void kvm_free_rd(struct kvm *kvm)
+{
+ int ret;
+
+ if (!kvm->arch.cvm.rd)
+ return;
+
+ ret = tmi_mem_free(kvm->arch.cvm.rd, NO_NUMA, TMM_MEM_TYPE_RD, TMM_MEM_MAP_SIZE_MAX);
+ if (ret)
+ kvm_err("tmi_mem_free for cvm rd failed: %d\n", kvm->arch.cvm.cvm_vmid);
+ else
+ kvm->arch.cvm.rd = 0;
+}
+
+void kvm_destroy_cvm(struct kvm *kvm)
+{
+ uint32_t cvm_vmid = kvm->arch.cvm.cvm_vmid;
+
+ if (kvm->arch.cvm.params) {
+ kfree(kvm->arch.cvm.params);
+ kvm->arch.cvm.params = NULL;
+ }
+
+ if (kvm_cvm_state(kvm) == CVM_STATE_NONE)
+ return;
+
+ cvm_vmid_release(cvm_vmid);
+
+ WRITE_ONCE(kvm->arch.cvm.state, CVM_STATE_DYING);
+
+ if (!tmi_cvm_destroy(kvm->arch.cvm.rd)) {
+ kvm_info("KVM has destroyed cVM: %d\n", kvm->arch.cvm.cvm_vmid);
+ }
+
+ kvm_free_rd(kvm);
+}
+
+static int kvm_get_host_numa_node_by_ipa(uint64_t ipa, struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct kvm_numa_info *numa_info = &vcpu->kvm->arch.cvm.numa_info;
+ for (i = 0; i < numa_info->numa_cnt && i < MAX_NUMA_NODE; i++) {
+ struct kvm_numa_node *numa_node = &numa_info->numa_nodes[i];
+ if (ipa >= numa_node->ipa_start && ipa < (numa_node->ipa_start + numa_node->ipa_size)) {
+ return numa_node->host_numa_node;
+ }
+ }
+ return NO_NUMA;
+}
+
+static int kvm_cvm_ttt_create(struct cvm *cvm,
+ unsigned long addr,
+ int level,
+ phys_addr_t phys)
+{
+ addr = ALIGN_DOWN(addr, cvm_ttt_level_mapsize(level - 1));
+ return tmi_ttt_create(phys, cvm->rd, addr, level);
+}
+
+int kvm_cvm_create_ttt_levels(struct kvm *kvm, struct cvm *cvm,
+ unsigned long ipa,
+ int level,
+ int max_level,
+ struct kvm_mmu_memory_cache *mc)
+{
+ if (WARN_ON(level == max_level))
+ return 0;
+
+ while (level++ < max_level) {
+ phys_addr_t ttt;
+ ttt = tmi_mem_alloc(cvm->rd, NO_NUMA, TMM_MEM_TYPE_TTT, TMM_MEM_MAP_SIZE_MAX);
+ if (ttt == 0)
+ return -ENOMEM;
+
+ if (kvm_cvm_ttt_create(cvm, ipa, level, ttt)) {
+ (void)tmi_mem_free(ttt, NO_NUMA, TMM_MEM_TYPE_TTT, TMM_MEM_MAP_SIZE_MAX);
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+static int kvm_cvm_create_protected_data_page(struct kvm *kvm, struct cvm *cvm, unsigned long ipa, int level,
+ struct page *src_page, phys_addr_t dst_phys)
+{
+ phys_addr_t src_phys;
+ int ret;
+
+ src_phys = page_to_phys(src_page);
+ ret = tmi_data_create(dst_phys, cvm->rd, ipa, src_phys, level);
+ if (TMI_RETURN_STATUS(ret) == TMI_ERROR_TTT_WALK) {
+ /* Create missing RTTs and retry */
+ int level_fault = TMI_RETURN_INDEX(ret);
+ ret = kvm_cvm_create_ttt_levels(kvm, cvm, ipa, level_fault,
+ level, NULL);
+ if (ret)
+ goto err;
+ ret = tmi_data_create(dst_phys, cvm->rd, ipa, src_phys, level);
+ }
+ WARN_ON(ret);
+
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ return -ENXIO;
+}
+
+static u64 cvm_granule_size(u32 level)
+{
+ return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
+}
+
+int kvm_cvm_populate_par_region(struct kvm *kvm,
+ phys_addr_t ipa_base,
+ phys_addr_t ipa_end,
+ phys_addr_t dst_phys)
+{
+ struct cvm *cvm = &kvm->arch.cvm;
+ struct kvm_memory_slot *memslot;
+ gfn_t base_gfn, end_gfn;
+ int idx;
+ phys_addr_t ipa;
+ int ret = 0;
+ int level = TMM_TTT_LEVEL_3;
+ unsigned long map_size = cvm_granule_size(level);
+
+ base_gfn = gpa_to_gfn(ipa_base);
+ end_gfn = gpa_to_gfn(ipa_end);
+
+ idx = srcu_read_lock(&kvm->srcu);
+ memslot = gfn_to_memslot(kvm, base_gfn);
+ if (!memslot) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* We require the region to be contained within a single memslot */
+ if (memslot->base_gfn + memslot->npages < end_gfn) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mmap_read_lock(current->mm);
+
+ ipa = ipa_base;
+ while (ipa < ipa_end) {
+ struct page *page;
+ kvm_pfn_t pfn;
+
+ /*
+ * FIXME: This causes over mapping, but there's no good
+ * solution here with the ABI as it stands
+ */
+ ipa = ALIGN_DOWN(ipa, map_size);
+
+ pfn = gfn_to_pfn_memslot(memslot, gpa_to_gfn(ipa));
+
+ if (is_error_pfn(pfn)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ page = pfn_to_page(pfn);
+
+ ret = kvm_cvm_create_protected_data_page(kvm, cvm, ipa, level, page, dst_phys);
+ if (ret)
+ goto err_release_pfn;
+
+ ipa += map_size;
+ dst_phys += map_size;
+ kvm_release_pfn_dirty(pfn);
+err_release_pfn:
+ if (ret) {
+ kvm_release_pfn_clean(pfn);
+ break;
+ }
+ }
+
+ mmap_read_unlock(current->mm);
+out:
+ srcu_read_unlock(&kvm->srcu, idx);
+ return ret;
+}
+
+static int kvm_sel2_map_protected_ipa(struct kvm_vcpu *vcpu)
+{
+ int ret = 0;
+ gpa_t gpa, gpa_data_end, gpa_end, data_size;
+ u64 map_size, dst_phys;
+ u64 l2_granule = cvm_granule_size(2); /* 2MB */
+ u64 numa_id;
+ int cur_numa_id;
+
+ /* 2MB alignment below addresses*/
+ gpa = vcpu->kvm->arch.cvm.loader_start;
+ gpa_end = vcpu->kvm->arch.cvm.loader_start + vcpu->kvm->arch.cvm.ram_size;
+ data_size = vcpu->kvm->arch.cvm.initrd_start - vcpu->kvm->arch.cvm.loader_start +
+ vcpu->kvm->arch.cvm.initrd_size;
+ data_size = round_up(data_size, l2_granule);
+ gpa_data_end = vcpu->kvm->arch.cvm.loader_start + data_size + l2_granule;
+ gpa = round_down(gpa, l2_granule);
+ gpa_end = round_up(gpa_end, l2_granule);
+ gpa_data_end = round_up(gpa_data_end, l2_granule);
+
+ /* get numa_id */
+ numa_id = kvm_get_host_numa_node_by_ipa(gpa, vcpu);
+ map_size = l2_granule;
+ do {
+ dst_phys = tmi_mem_alloc(vcpu->kvm->arch.cvm.rd, numa_id, TMM_MEM_TYPE_CVM_PA, map_size);
+ if (!dst_phys) {
+ ret = -ENOMEM;
+ kvm_err("[%s] call tmi_mem_alloc failed.\n", __func__);
+ goto out;
+ }
+
+ /* Try to call tmi_data_create to copy kernel data, and call tmi_data_create
+ *to map all necessary gpa for system boot, only copy the data_size is not enough
+ *to boot kernel, here, we copy and map another 2MB. */
+ ret = kvm_cvm_populate_par_region(vcpu->kvm, gpa, gpa + map_size, dst_phys);
+ if (ret) {
+ ret = -EFAULT;
+ kvm_err("kvm_cvm_populate_par_region fail:%d.\n", ret);
+ goto out;
+ }
+ gpa += map_size;
+ dst_phys += map_size;
+ } while (gpa < gpa_data_end);
+
+ cur_numa_id = numa_node_id();
+ if (cur_numa_id < 0) {
+ ret = -EFAULT;
+ kvm_err("get current numa node fail\n");
+ goto out;
+ }
+
+ /* Map gpa range to secure mem without copy data from host.
+ * The cvm gpa map pages will free by destroy cvm. */
+ ret = tmi_ttt_map_range(vcpu->kvm->arch.cvm.rd, gpa_data_end,
+ gpa_end - gpa_data_end, cur_numa_id, numa_id);
+ if (ret)
+ kvm_err("tmi_ttt_map_range fail:%d.\n", ret);
+out:
+ return ret;
+}
+
+int kvm_create_tec(struct kvm_vcpu *vcpu)
+{
+ int ret;
+ int i;
+ struct tmi_tec_params *params_ptr;
+ struct user_pt_regs *vcpu_regs = vcpu_gp_regs(vcpu);
+ uint64_t mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
+
+ params_ptr = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
+ if (!params_ptr) {
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < TEC_CREATE_NR_GPRS; ++i) {
+ params_ptr->gprs[i] = vcpu_regs->regs[i];
+ }
+
+ params_ptr->pc = vcpu_regs->pc;
+
+ if (vcpu->vcpu_id == 0) {
+ params_ptr->flags = TMI_RUNNABLE;
+ } else {
+ params_ptr->flags = TMI_NOT_RUNNABLE;
+ }
+ params_ptr->ram_size = vcpu->kvm->arch.cvm.ram_size;
+ ret = tmi_tec_create(vcpu->arch.tec.tec, vcpu->kvm->arch.cvm.rd, mpidr, __pa(params_ptr));
+
+ kfree(params_ptr);
+
+ return ret;
+}
+
+static int kvm_create_all_tecs(struct kvm *kvm)
+{
+ int ret = 0;
+ struct kvm_vcpu *vcpu;
+ unsigned long i;
+
+ if (READ_ONCE(kvm->arch.cvm.state) == CVM_STATE_ACTIVE) {
+ return -1;
+ }
+
+ mutex_lock(&kvm->lock);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!vcpu->arch.tec.tec_created) {
+ ret = kvm_create_tec(vcpu);
+ if (ret) {
+ mutex_unlock(&kvm->lock);
+ return ret;
+ }
+ vcpu->arch.tec.tec_created = true;
+ }
+ }
+ mutex_unlock(&kvm->lock);
+ return ret;
+}
+
+static int config_cvm_sve(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *cfg)
+{
+ tmi_cvm_params_t *params = kvm->arch.cvm.params;
+
+ int max_sve_vq = u64_get_bits(tmm_feat_reg0,
+ TMI_FEATURE_REGISTER_0_SVE_VL);
+
+ if (!kvm_cvm_supports_sve())
+ return -EINVAL;
+
+ if (cfg->sve_vq > max_sve_vq)
+ return -EINVAL;
+
+ params->sve_vl = cfg->sve_vq;
+ params->flags |= TMI_CVM_PARAM_FLAG_SVE;
+
+ return 0;
+}
+
+static int config_cvm_pmu(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *cfg)
+{
+ tmi_cvm_params_t *params = kvm->arch.cvm.params;
+
+ int max_pmu_num_ctrs = u64_get_bits(tmm_feat_reg0,
+ TMI_FEATURE_REGISTER_0_PMU_NUM_CTRS);
+
+ if (!kvm_cvm_supports_pmu())
+ return -EINVAL;
+
+ if (cfg->num_pmu_cntrs > max_pmu_num_ctrs)
+ return -EINVAL;
+
+ params->pmu_num_cnts = cfg->num_pmu_cntrs;
+ params->flags |= TMI_CVM_PARAM_FLAG_PMU;
+
+ return 0;
+}
+
+static int kvm_tmm_config_cvm(struct kvm *kvm, struct kvm_enable_cap *cap)
+{
+ struct kvm_cap_arm_tmm_config_item cfg;
+ int r = 0;
+
+ if (kvm_cvm_state(kvm) != CVM_STATE_NONE)
+ return -EBUSY;
+
+ if (copy_from_user(&cfg, (void __user *)cap->args[1], sizeof(cfg))) {
+ return -EFAULT;
+ }
+
+ switch (cfg.cfg) {
+ case KVM_CAP_ARM_TMM_CFG_SVE:
+ r = config_cvm_sve(kvm, &cfg);
+ break;
+ case KVM_CAP_ARM_TMM_CFG_PMU:
+ r = config_cvm_pmu(kvm, &cfg);
+ break;
+ default:
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
+{
+ int r = 0;
+
+ mutex_lock(&kvm->lock);
+ switch (cap->args[0]) {
+ case KVM_CAP_ARM_TMM_CONFIG_CVM_HOST:
+ r = kvm_tmm_config_cvm(kvm, cap);
+ break;
+ case KVM_CAP_ARM_TMM_CREATE_CVM:
+ r = kvm_arm_create_cvm(kvm);
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+ mutex_unlock(&kvm->lock);
+
+ return r;
+}
+
+void kvm_destroy_tec(struct kvm_vcpu *vcpu)
+{
+ int ret = 0;
+ if (!vcpu_is_tec(vcpu)) {
+ return;
+ }
+
+ if (tmi_tec_destroy(vcpu->arch.tec.tec) != 0) {
+ kvm_err("%s vcpu id : %d failed!\n", __func__, vcpu->vcpu_id);
+ }
+
+ ret = tmi_mem_free(vcpu->arch.tec.tec, NO_NUMA, TMM_MEM_TYPE_TEC, TMM_MEM_MAP_SIZE_MAX);
+ if (ret != 0) {
+ kvm_err("tmi_mem_free for cvm tec failed\n");
+ }
+ vcpu->arch.tec.tec = 0;
+ kfree(vcpu->arch.tec.tec_run);
+}
+
+static int tmi_check_version(void)
+{
+ uint64_t res;
+ int version_major;
+ int version_minor;
+
+ res = tmi_version();
+ if (res == SMCCC_RET_NOT_SUPPORTED) {
+ return -ENXIO;
+ }
+
+ version_major = TMI_ABI_VERSION_GET_MAJOR(res);
+ version_minor = TMI_ABI_VERSION_GET_MINOR(res);
+
+ if (version_major != TMI_ABI_VERSION_MAJOR) {
+ kvm_err("Unsupported TMI_ABI (version %d %d)\n", version_major,
+ version_minor);
+ return -ENXIO;
+ }
+
+ kvm_info("TMI ABI version %d,%d\n", version_major, version_minor);
+ return 0;
+}
+
+static int kvm_kick_boot_vcpu(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ unsigned long i;
+
+ if (READ_ONCE(kvm->arch.cvm.state) == CVM_STATE_ACTIVE) {
+ return 0;
+ }
+
+ mutex_lock(&kvm->lock);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (i == 0) {
+ kvm_vcpu_kick(vcpu);
+ }
+ }
+ mutex_unlock(&kvm->lock);
+ return 0;
+}
+
+int kvm_arm_cvm_first_run(struct kvm_vcpu *vcpu)
+{
+ int ret = 0;
+
+ if (READ_ONCE(vcpu->kvm->arch.cvm.state) == CVM_STATE_ACTIVE) {
+ return ret;
+ }
+
+ if (vcpu->vcpu_id == 0) {
+ ret = kvm_create_all_tecs(vcpu->kvm);
+ if (ret != 0) {
+ return ret;
+ }
+ } else {
+ kvm_kick_boot_vcpu(vcpu->kvm);
+ }
+
+ mutex_lock(&vcpu->kvm->lock);
+
+ if (vcpu->vcpu_id == 0) {
+ ret = kvm_sel2_map_protected_ipa(vcpu);
+ if (ret) {
+ kvm_err("Map protected ipa failed!\n");
+ goto unlock_exit;
+ }
+ ret = tmi_cvm_activate(vcpu->kvm->arch.cvm.rd);
+ if (ret) {
+ kvm_err("tmi_cvm_activate failed!\n");
+ goto unlock_exit;
+ }
+
+ WRITE_ONCE(vcpu->kvm->arch.cvm.state, CVM_STATE_ACTIVE);
+ kvm_info("cVM%d is activated!\n", vcpu->kvm->arch.cvm.cvm_vmid);
+ }
+unlock_exit:
+ mutex_unlock(&vcpu->kvm->lock);
+
+ return ret;
+}
+
+int kvm_tec_enter(struct kvm_vcpu *vcpu)
+{
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+ if (READ_ONCE(vcpu->kvm->arch.cvm.state) != CVM_STATE_ACTIVE) {
+ return -EINVAL;
+ }
+
+ /* set/clear TWI TWE flags */
+ if (vcpu->arch.hcr_el2 & HCR_TWI) {
+ run->tec_entry.flags |= TEC_ENTRY_FLAG_TRAP_WFI;
+ } else {
+ run->tec_entry.flags &= ~TEC_ENTRY_FLAG_TRAP_WFI;
+ }
+
+ if (vcpu->arch.hcr_el2 & HCR_TWE) {
+ run->tec_entry.flags |= TEC_ENTRY_FLAG_TRAP_WFE;
+ } else {
+ run->tec_entry.flags &= ~TEC_ENTRY_FLAG_TRAP_WFE;
+ }
+
+ return tmi_tec_enter(vcpu->arch.tec.tec, __pa(run));
+}
+
+int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target)
+{
+ int ret;
+ ret = tmi_psci_complete(calling->arch.tec.tec, target->arch.tec.tec);
+ if (ret) {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#define SIMD_PAGE_SIZE 3*PAGE_SIZE
+int kvm_arch_tec_init(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.tec.tec_run = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
+ if (!vcpu->arch.tec.tec_run) {
+ return -ENOMEM;
+ }
+
+ vcpu->arch.tec.tec = tmi_mem_alloc(vcpu->kvm->arch.cvm.rd, NO_NUMA, TMM_MEM_TYPE_TEC, TMM_MEM_MAP_SIZE_MAX);
+ if (vcpu->arch.tec.tec == 0) {
+ kvm_info("KVM tmi_mem_alloc failed:%d\n", vcpu->vcpu_id);
+ return -ENOMEM;
+ }
+ kvm_info("KVM inits cVM VCPU:%d\n", vcpu->vcpu_id);
+
+ return 0;
+}
+
+int kvm_init_tmm(void)
+{
+ int ret;
+
+ if (PAGE_SIZE != SZ_4K) {
+ return 0;
+ }
+
+ if (tmi_check_version()) {
+ return 0;
+ }
+
+ ret = cvm_vmid_init();
+ if (ret) {
+ return ret;
+ }
+
+ tmm_feat_reg0 = tmi_features(0);
+ kvm_info("TMM feature0: 0x%lx\n", tmm_feat_reg0);
+
+ static_branch_enable(&kvm_cvm_is_available);
+
+ return 0;
+}
+
+int kvm_init_cvm_vm(struct kvm *kvm)
+{
+ struct tmi_cvm_params *params;
+
+ params = (struct tmi_cvm_params *)kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
+ if (!params) {
+ return -ENOMEM;
+ }
+
+ kvm->arch.cvm.params = params;
+
+ return 0;
+}
+
+int kvm_load_user_data(struct kvm *kvm, unsigned long arg)
+{
+ struct kvm_user_data user_data;
+ void __user *argp = (void __user *)arg;
+
+ if (!kvm_is_cvm(kvm))
+ return -EFAULT;
+
+ if (copy_from_user(&user_data, argp, sizeof(user_data)))
+ return -EFAULT;
+
+ kvm->arch.cvm.loader_start = user_data.loader_start;
+ kvm->arch.cvm.initrd_start = user_data.initrd_start;
+ kvm->arch.cvm.initrd_size = user_data.initrd_size;
+ kvm->arch.cvm.ram_size = user_data.ram_size;
+ memcpy(&kvm->arch.cvm.numa_info, &user_data.numa_info, sizeof(struct kvm_numa_info));
+
+ return 0;
+}
+
+void kvm_cvm_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ kvm_timer_vcpu_put(vcpu);
+ kvm_vgic_put(vcpu);
+ vcpu->cpu = -1;
+}
+unsigned long cvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu,
+ unsigned long target_affinity, unsigned long lowest_affinity_level)
+{
+ struct kvm_vcpu *target_vcpu;
+
+ if (lowest_affinity_level != 0)
+ return PSCI_RET_INVALID_PARAMS;
+
+ target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, target_affinity);
+ if (!target_vcpu)
+ return PSCI_RET_INVALID_PARAMS;
+
+ cvm_psci_complete(vcpu, target_vcpu);
+ return PSCI_RET_SUCCESS;
+}
+
+int kvm_cvm_vcpu_set_events(struct kvm_vcpu *vcpu,
+ bool serror_pending, bool ext_dabt_pending)
+{
+ if (serror_pending)
+ return -EINVAL;
+
+ if (ext_dabt_pending) {
+ if (!(((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_entry.flags &
+ TEC_ENTRY_FLAG_EMUL_MMIO))
+ return -EINVAL;
+
+ ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_entry.flags
+ &= ~TEC_ENTRY_FLAG_EMUL_MMIO;
+ ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_entry.flags
+ |= TEC_ENTRY_FLAG_INJECT_SEA;
+ }
+ return 0;
+}
diff --git a/arch/arm64/kvm/cvm_exit.c b/arch/arm64/kvm/cvm_exit.c
new file mode 100644
index 000000000..ba07a23be
--- /dev/null
+++ b/arch/arm64/kvm/cvm_exit.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+#include <linux/kvm_host.h>
+#include <kvm/arm_hypercalls.h>
+#include <kvm/arm_psci.h>
+
+#include <asm/kvm_tmi.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_coproc.h>
+
+typedef int (*exit_handler_fn)(struct kvm_vcpu *vcpu);
+
+static void update_arch_timer_irq_lines(struct kvm_vcpu *vcpu, bool unmask_ctl)
+{
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+
+ __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = run->tec_exit.cntv_ctl;
+ __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = run->tec_exit.cntv_cval;
+ __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = run->tec_exit.cntp_ctl;
+ __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = run->tec_exit.cntp_cval;
+
+ /* Because the timer mask is tainted by TMM, we don't know the
+ * true intent of the guest. Here, we assume mask is always
+ * cleared during WFI.
+ */
+ if (unmask_ctl) {
+ __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) &= ~ARCH_TIMER_CTRL_IT_MASK;
+ __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) &= ~ARCH_TIMER_CTRL_IT_MASK;
+ }
+
+ kvm_cvm_timers_update(vcpu);
+}
+
+static int tec_exit_reason_notimpl(struct kvm_vcpu *vcpu)
+{
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+
+ pr_err("[vcpu %d] Unhandled exit reason from cvm (ESR: %#llx)\n",
+ vcpu->vcpu_id, run->tec_exit.esr);
+ return -ENXIO;
+}
+
+/* The process is the same as kvm_handle_wfx,
+ * except the tracing and updating operation for pc,
+ * we copy kvm_handle_wfx process here
+ * to avoid changing kvm_handle_wfx function.
+ */
+static int tec_exit_wfx(struct kvm_vcpu *vcpu)
+{
+ u64 esr = kvm_vcpu_get_esr(vcpu);
+
+ if (esr & ESR_ELx_WFx_ISS_WFE) {
+ vcpu->stat.wfe_exit_stat++;
+ } else {
+ vcpu->stat.wfi_exit_stat++;
+ }
+
+ if (esr & ESR_ELx_WFx_ISS_WFxT) {
+ if (esr & ESR_ELx_WFx_ISS_RV) {
+ u64 val, now;
+
+ now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT);
+ val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
+
+ if (now >= val)
+ goto out;
+ } else {
+ /* Treat WFxT as WFx if RN is invalid */
+ esr &= ~ESR_ELx_WFx_ISS_WFxT;
+ }
+ }
+
+ if (esr & ESR_ELx_WFx_ISS_WFE) {
+ kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
+ } else {
+ vcpu->arch.pvsched.pv_unhalted = false;
+ if (esr & ESR_ELx_WFx_ISS_WFxT)
+ vcpu->arch.flags |= KVM_ARM64_WFIT;
+ kvm_vcpu_block(vcpu);
+ vcpu->arch.flags &= ~KVM_ARM64_WFIT;
+ kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+ }
+
+out:
+ return 1;
+}
+
+static int tec_exit_sys_reg(struct kvm_vcpu *vcpu)
+{
+ int ret;
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+ unsigned long esr = kvm_vcpu_get_esr(vcpu);
+ int rt = kvm_vcpu_sys_get_rt(vcpu);
+ bool is_write = !(esr & 1);
+
+ if (is_write) {
+ vcpu_set_reg(vcpu, rt, run->tec_exit.gprs[0]);
+ }
+
+ ret = kvm_handle_sys_reg(vcpu);
+
+ if (ret >= 0 && !is_write) {
+ run->tec_entry.gprs[0] = vcpu_get_reg(vcpu, rt);
+ }
+ return ret;
+}
+
+static int tec_exit_sync_dabt(struct kvm_vcpu *vcpu)
+{
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+
+ if (kvm_vcpu_dabt_iswrite(vcpu) && kvm_vcpu_dabt_isvalid(vcpu)) {
+ vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu),
+ run->tec_exit.gprs[0]);
+ }
+ return kvm_handle_guest_abort(vcpu);
+}
+
+static int tec_exit_sync_iabt(struct kvm_vcpu *vcpu)
+{
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+
+ pr_err("[vcpu %d] Unhandled instruction abort (ESR: %#llx).\n",
+ vcpu->vcpu_id, run->tec_exit.esr);
+
+ return -ENXIO;
+}
+
+static exit_handler_fn tec_exit_handlers[] = {
+ [0 ... ESR_ELx_EC_MAX] = tec_exit_reason_notimpl,
+ [ESR_ELx_EC_WFx] = tec_exit_wfx,
+ [ESR_ELx_EC_SYS64] = tec_exit_sys_reg,
+ [ESR_ELx_EC_DABT_LOW] = tec_exit_sync_dabt,
+ [ESR_ELx_EC_IABT_LOW] = tec_exit_sync_iabt
+};
+
+static int tec_exit_psci(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+
+ for (i = 0; i < TEC_EXIT_NR_GPRS; ++i) {
+ vcpu_set_reg(vcpu, i, run->tec_exit.gprs[i]);
+ }
+
+ return kvm_psci_call(vcpu);
+}
+
+static int tec_exit_host_call(struct kvm_vcpu *vcpu)
+{
+ int ret, i;
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+
+ vcpu->stat.hvc_exit_stat++;
+
+ for (i = 0; i < TEC_EXIT_NR_GPRS; ++i) {
+ vcpu_set_reg(vcpu, i, run->tec_exit.gprs[i]);
+ }
+
+ ret = kvm_hvc_call_handler(vcpu);
+
+ if (ret < 0) {
+ vcpu_set_reg(vcpu, 0, ~0UL);
+ ret = 1;
+ }
+ for (i = 0; i < TEC_EXIT_NR_GPRS; ++i) {
+ run->tec_entry.gprs[i] = vcpu_get_reg(vcpu, i);
+ }
+
+ return ret;
+}
+
+/*
+ * Return > 0 to return to guest, < 0 on error, 0(and set exit_reason) on
+ * proper exit to userspace
+ */
+
+int handle_cvm_exit(struct kvm_vcpu *vcpu, int tec_run_ret)
+{
+ unsigned long status;
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+ u8 esr_ec = ESR_ELx_EC(run->tec_exit.esr);
+ bool is_wfx;
+
+ status = TMI_RETURN_STATUS(tec_run_ret);
+
+ if (status == TMI_ERROR_CVM_POWEROFF) {
+ vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+ vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SHUTDOWN;
+ return 0;
+ }
+
+ if (status == TMI_ERROR_CVM_STATE) {
+ vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
+ return 0;
+ }
+
+ if (tec_run_ret) {
+ return -ENXIO;
+ }
+
+ vcpu->arch.fault.esr_el2 = run->tec_exit.esr;
+ vcpu->arch.fault.far_el2 = run->tec_exit.far;
+ vcpu->arch.fault.hpfar_el2 = run->tec_exit.hpfar;
+
+ is_wfx = (run->tec_exit.exit_reason == TMI_EXIT_SYNC) && (esr_ec == ESR_ELx_EC_WFx);
+ update_arch_timer_irq_lines(vcpu, is_wfx);
+
+ run->tec_entry.flags = 0;
+
+ switch (run->tec_exit.exit_reason) {
+ case TMI_EXIT_FIQ:
+ case TMI_EXIT_IRQ:
+ return 1;
+ case TMI_EXIT_PSCI:
+ return tec_exit_psci(vcpu);
+ case TMI_EXIT_SYNC:
+ return tec_exit_handlers[esr_ec](vcpu);
+ case TMI_EXIT_HOST_CALL:
+ return tec_exit_host_call(vcpu);
+ }
+
+ kvm_pr_unimpl("Unsupported exit reason : 0x%llx\n",
+ run->tec_exit.exit_reason);
+ return 0;
+}
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index f9c3dbc99..ecdd35527 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -27,6 +27,10 @@
#include <asm/kvm_coproc.h>
#include <asm/sigcontext.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
+
#include "trace.h"
struct kvm_stats_debugfs_item debugfs_entries[] = {
@@ -818,6 +822,10 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
bool has_esr = events->exception.serror_has_esr;
bool ext_dabt_pending = events->exception.ext_dabt_pending;
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ return kvm_cvm_vcpu_set_events(vcpu, serror_pending, ext_dabt_pending);
+#endif
if (serror_pending && has_esr) {
if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
return -EINVAL;
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 452f4cacd..54d541767 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -229,6 +229,25 @@ void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
}
}
+#ifdef CONFIG_CVM_HOST
+void __vgic_v3_restore_tec_state(struct vgic_v3_cpu_if *cpu_if,
+ u64 *entry_hcr,
+ u64 *entry_lrs)
+{
+ u64 used_lrs = cpu_if->used_lrs;
+ int i;
+
+ *entry_hcr = cpu_if->vgic_hcr;
+ for (i = 0; i < kvm_vgic_global_state.nr_lr; ++i) {
+ if (i < used_lrs) {
+ entry_lrs[i] = cpu_if->vgic_lr[i];
+ } else {
+ entry_lrs[i] = 0;
+ }
+ }
+}
+#endif
+
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if)
{
u64 used_lrs = cpu_if->used_lrs;
diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c
index 6a2826f1b..6fc49784b 100644
--- a/arch/arm64/kvm/mmio.c
+++ b/arch/arm64/kvm/mmio.c
@@ -8,6 +8,10 @@
#include <asm/kvm_emulate.h>
#include <trace/events/kvm.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
+
#include "trace.h"
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
@@ -109,6 +113,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
&data);
data = vcpu_data_host_to_guest(vcpu, data, len);
vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->
+ tec_entry.gprs[0] = data;
+ }
+#endif
}
/*
@@ -177,7 +187,12 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
run->mmio.phys_addr = fault_ipa;
run->mmio.len = len;
vcpu->mmio_needed = 1;
-
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_entry.flags |=
+ TEC_ENTRY_FLAG_EMUL_MMIO;
+ }
+#endif
if (!ret) {
/* We handled the access successfully in the kernel. */
if (!is_write)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 6fa92a143..30426f6ad 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -474,6 +474,7 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
spin_lock(&kvm->mmu_lock);
pgt = mmu->pgt;
+
if (pgt) {
mmu->pgd_phys = 0;
mmu->pgt = NULL;
@@ -790,6 +791,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level);
write_fault = kvm_is_write_fault(vcpu);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ write_fault = true;
+ prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W;
+ }
+#endif
exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
VM_BUG_ON(write_fault && exec_fault);
vcpu->stat.mabt_exit_stat++;
diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
index 32bb26be8..0160ee8d6 100644
--- a/arch/arm64/kvm/psci.c
+++ b/arch/arm64/kvm/psci.c
@@ -16,6 +16,9 @@
#include <kvm/arm_psci.h>
#include <kvm/arm_hypercalls.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
/*
* This is an implementation of the Power State Coordination Interface
* as described in ARM document number ARM DEN 0022A.
@@ -78,6 +81,10 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
*/
if (!vcpu)
return PSCI_RET_INVALID_PARAMS;
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ cvm_psci_complete(source_vcpu, vcpu);
+#endif
if (!vcpu->arch.power_off) {
if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
return PSCI_RET_ALREADY_ON;
@@ -133,7 +140,10 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
/* Ignore other bits of target affinity */
target_affinity &= target_affinity_mask;
-
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ return cvm_psci_vcpu_affinity_info(vcpu, target_affinity, lowest_affinity_level);
+#endif
/*
* If one or more VCPU matching target affinity are running
* then ON else OFF
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 51f4c5e85..bb177d58c 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -30,6 +30,9 @@
#include <asm/kvm_mmu.h>
#include <asm/virt.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
/* Maximum phys_shift supported for any VM on this host */
static u32 kvm_ipa_limit;
@@ -199,6 +202,9 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
{
kfree(vcpu->arch.sve_state);
+#ifdef CONFIG_CVM_HOST
+ kvm_destroy_tec(vcpu);
+#endif
}
static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
@@ -433,7 +439,11 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
u32 parange, phys_shift;
u8 lvls, pbha = 0xf;
+#ifdef CONFIG_CVM_HOST
+ if ((type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) && (!kvm_is_cvm(kvm)))
+#else
if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
+#endif
return -EINVAL;
phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
diff --git a/arch/arm64/kvm/tmi.c b/arch/arm64/kvm/tmi.c
new file mode 100644
index 000000000..6eb5dbd97
--- /dev/null
+++ b/arch/arm64/kvm/tmi.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+#include <linux/arm-smccc.h>
+#include <asm/kvm_tmi.h>
+
+u64 tmi_version(void)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_VESION, &res);
+ return res.a1;
+}
+
+u64 tmi_data_create(u64 data, u64 rd, u64 map_addr, u64 src, u64 level)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_DATA_CREATE, data, rd, map_addr, src, level, &res);
+ return res.a1;
+}
+
+u64 tmi_data_destroy(u64 rd, u64 map_addr, u64 level)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_DATA_DESTROY, rd, map_addr, level, &res);
+ return res.a1;
+}
+
+u64 tmi_cvm_activate(u64 rd)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_CVM_ACTIVATE, rd, &res);
+ return res.a1;
+}
+
+u64 tmi_cvm_create(u64 rd, u64 params_ptr)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_CVM_CREATE, rd, params_ptr, &res);
+ return res.a1;
+}
+
+u64 tmi_cvm_destroy(u64 rd)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_CVM_DESTROY, rd, &res);
+ return res.a1;
+}
+
+u64 tmi_tec_create(u64 tec, u64 rd, u64 mpidr, u64 params_ptr)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TEC_CREATE, tec, rd, mpidr, params_ptr, &res);
+ return res.a1;
+}
+
+u64 tmi_tec_destroy(u64 tec)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TEC_DESTROY, tec, &res);
+ return res.a1;
+}
+
+u64 tmi_tec_enter(u64 tec, u64 run_ptr)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TEC_ENTER, tec, run_ptr, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_create(u64 ttt, u64 rd, u64 map_addr, u64 level)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_CREATE, ttt, rd, map_addr, level, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_destroy(u64 ttt, u64 rd, u64 map_addr, u64 level)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_DESTROY, ttt, rd, map_addr, level, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_map_unprotected(u64 rd, u64 map_addr, u64 level, u64 ttte)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_MAP_UNPROTECTED, rd, map_addr, level, ttte, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_unmap_unprotected(u64 rd, u64 map_addr, u64 level, u64 ns)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_UNMAP_UNPROTECTED, rd, map_addr, level, ns, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_unmap_protected(u64 rd, u64 map_addr, u64 level)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_UNMAP_PROTECTED, rd, map_addr, level, &res);
+ return res.a1;
+}
+
+u64 tmi_psci_complete(u64 calling_tec, u64 target_tec)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_PSCI_COMPLETE, calling_tec, target_tec, &res);
+ return res.a1;
+}
+
+u64 tmi_features(u64 index)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_FEATURES, index, &res);
+ return res.a1;
+}
+
+u64 tmi_mem_alloc(u64 rd, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type,
+ enum tmi_tmm_map_size tmm_map_size)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_MEM_ALLOC, rd, numa_id, tmm_mem_type, tmm_map_size, &res);
+ return res.a1;
+}
+
+u64 tmi_mem_free(u64 pa, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type,
+ enum tmi_tmm_map_size tmm_map_size)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_MEM_FREE, pa, numa_id, tmm_mem_type, tmm_map_size, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_map_range(u64 rd, u64 map_addr, u64 size, u64 cur_node, u64 target_node)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_MAP_RANGE, rd, map_addr, size, cur_node, target_node, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_unmap_range(u64 rd, u64 map_addr, u64 size, u64 node_id)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_UNMAP_RANGE, rd, map_addr, size, node_id, &res);
+ return res.a1;
+}
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 213afce81..c80730152 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -10,6 +10,10 @@
#include <asm/kvm_mmu.h>
#include <asm/kvm_asm.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
+
#include "vgic.h"
static bool group0_trap;
@@ -674,7 +678,10 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
kvm_vgic_global_state.vcpu_base = 0;
} else {
kvm_vgic_global_state.vcpu_base = info->vcpu.start;
- kvm_vgic_global_state.can_emulate_gicv2 = true;
+#ifdef CONFIG_CVM_HOST
+ if (!static_branch_unlikely(&kvm_cvm_is_available))
+#endif
+ kvm_vgic_global_state.can_emulate_gicv2 = true;
ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
if (ret) {
kvm_err("Cannot register GICv2 KVM device.\n");
@@ -735,7 +742,12 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
-
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ cpu_if->vgic_vmcr = ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_exit.gicv3_vmcr;
+ return;
+ }
+#endif
if (likely(cpu_if->vgic_sre))
cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
}
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index 116aa91d5..ef9ffea7a 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -12,6 +12,10 @@
#include <asm/kvm_hyp.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
+
#include "vgic.h"
#define CREATE_TRACE_POINTS
@@ -872,11 +876,42 @@ static inline bool can_access_vgic_from_kernel(void)
return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe();
}
+#ifdef CONFIG_CVM_HOST
+static inline void vgic_tmm_save_state(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ struct tmi_tec_run *tec_run = vcpu->arch.tec.tec_run;
+
+ for (i = 0; i < kvm_vgic_global_state.nr_lr; ++i) {
+ cpu_if->vgic_lr[i] = tec_run->tec_exit.gicv3_lrs[i];
+ tec_run->tec_entry.gicv3_lrs[i] = 0;
+ }
+}
+
+static inline void vgic_tmm_restore_state(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ struct tmi_tec_run *tec_run = vcpu->arch.tec.tec_run;
+
+ for (i = 0; i < kvm_vgic_global_state.nr_lr; ++i) {
+ tec_run->tec_entry.gicv3_lrs[i] = cpu_if->vgic_lr[i];
+ tec_run->tec_exit.gicv3_lrs[i] = cpu_if->vgic_lr[i];
+ }
+}
+#endif
+
static inline void vgic_save_state(struct kvm_vcpu *vcpu)
{
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_save_state(vcpu);
else
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ vgic_tmm_save_state(vcpu);
+ else
+#endif
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
}
@@ -907,6 +942,13 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_restore_state(vcpu);
else
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)){
+ vgic_tmm_restore_state(vcpu);
+ return;
+ }
+ else
+#endif
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
}
@@ -948,7 +990,10 @@ void kvm_vgic_load(struct kvm_vcpu *vcpu)
{
if (unlikely(!vgic_initialized(vcpu->kvm)))
return;
-
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ return;
+#endif
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_load(vcpu);
else
@@ -959,7 +1004,10 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
{
if (unlikely(!vgic_initialized(vcpu->kvm)))
return;
-
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ return;
+#endif
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_put(vcpu);
else
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 413d6f9bc..18ccd16fc 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -117,4 +117,8 @@ void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
u32 timer_get_ctl(struct arch_timer_context *ctxt);
u64 timer_get_cval(struct arch_timer_context *ctxt);
+#ifdef CONFIG_CVM_HOST
+/* Needed for S-EL2 */
+void kvm_cvm_timers_update(struct kvm_vcpu *vcpu);
+#endif
#endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 595c9da4f..1cb861d6c 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -345,6 +345,27 @@ struct kvm_vcpu {
struct kvm_vcpu_arch arch;
};
+#ifdef CONFIG_CVM_HOST
+#define KVM_TYPE_CVM_BIT 8
+#define CVM_MAX_HALT_POLL_NS 100000
+
+DECLARE_STATIC_KEY_FALSE(kvm_cvm_is_available);
+
+static __always_inline bool vcpu_is_tec(struct kvm_vcpu *vcpu)
+{
+ if (static_branch_unlikely(&kvm_cvm_is_available)) {
+ return vcpu->arch.tec.tec_run;
+ }
+ return false;
+}
+
+static inline bool kvm_arm_cvm_type(unsigned long type)
+{
+ return type & (1UL << KVM_TYPE_CVM_BIT);
+}
+
+#endif
+
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
/*
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index a1d8b1184..3332ee9ed 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1373,6 +1373,35 @@ struct kvm_master_dev_info {
#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
#define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
+#ifdef CONFIG_CVM_HOST
+#define KVM_LOAD_USER_DATA _IOW(KVMIO, 0x49, struct kvm_user_data)
+
+#define KVM_CAP_ARM_TMM 300 /* FIXME: Large number to prevent conflicts */
+#define MAX_NUMA_NODE 8
+#define MAX_CPU_BIT_MAP 0
+
+struct kvm_numa_node {
+ __u64 numa_id;
+ __u64 ipa_start;
+ __u64 ipa_size;
+ int64_t host_numa_node;
+ __u64 cpu_id[MAX_CPU_BIT_MAP];
+};
+
+struct kvm_numa_info {
+ __u64 numa_cnt;
+ struct kvm_numa_node numa_nodes[MAX_NUMA_NODE];
+};
+
+struct kvm_user_data {
+ __u64 loader_start;
+ __u64 initrd_start;
+ __u64 initrd_size;
+ __u64 ram_size;
+ struct kvm_numa_info numa_info;
+};
+#endif
+
/* enable ucontrol for s390 */
struct kvm_s390_ucas_mapping {
__u64 user_addr;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9166ef044..ef9f6d9df 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1089,7 +1089,12 @@ static struct kvm *kvm_create_vm(unsigned long type)
goto out_err_no_arch_destroy_vm;
}
- kvm->max_halt_poll_ns = halt_poll_ns;
+#ifdef CONFIG_CVM_HOST
+ if (kvm_arm_cvm_type(type))
+ kvm->max_halt_poll_ns = CVM_MAX_HALT_POLL_NS;
+ else
+#endif
+ kvm->max_halt_poll_ns = halt_poll_ns;
r = kvm_arch_init_vm(kvm, type);
if (r)
--
2.33.0
2
1
Backport following patches to fix CVE-2021-47037.
Reference: https://nvd.nist.gov/vuln/detail/CVE-2021-47037
Dmitry Baryshkov (1):
[Backport] ASoC: q6afe-clocks: fix reprobing of the driver
Srinivas Kandagatla (1):
[Backport] ASoC: q6afe-clocks: fix warning on symbol scope
sound/soc/qcom/qdsp6/q6afe-clocks.c | 209 ++++++++++++++--------------
sound/soc/qcom/qdsp6/q6afe.c | 2 +-
sound/soc/qcom/qdsp6/q6afe.h | 2 +-
3 files changed, 108 insertions(+), 105 deletions(-)
--
2.20.1
2
3
From: Jingxian He hejingxian(a)huawei.com<mailto:hejingxian@huawei.com>
hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
CVE: N/A
------------
Add lpi support for cvm guest os:
The gic-its driver mapped memory must shared with qemu/kvm.
The cvm guest gic-its driver alloc memory from bounce buffer
to share with qemu/kvm.
Signed-off-by: Jingxian He <hejingxian(a)huawei.com>
Signed-off-by: wuweinan <wuweinan(a)huawei.com>
---
drivers/irqchip/irq-gic-v3-its.c | 228 ++++++++++++++++++++++++++++---
1 file changed, 207 insertions(+), 21 deletions(-)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 632444f86..a09cad8a0 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -29,6 +29,10 @@
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
+#ifdef CONFIG_CVM_GUEST
+#include <linux/swiotlb.h>
+#include <asm/cvm_guest.h>
+#endif
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-v3.h>
@@ -182,6 +186,90 @@ struct its_baser {
struct its_device;
+#ifdef CONFIG_CVM_GUEST
+static struct device cvm_alloc_device;
+static LIST_HEAD(cvm_its_nodes);
+static raw_spinlock_t cvm_its_lock;
+
+struct its_device_order {
+ struct its_device *dev;
+ struct list_head entry;
+ int itt_order;
+};
+
+static inline struct page *its_alloc_shared_pages_node(int node, gfp_t gfp,
+ unsigned int order)
+{
+ return swiotlb_alloc(&cvm_alloc_device, (1 << order) * PAGE_SIZE);
+}
+
+static inline struct page *its_alloc_shared_pages(gfp_t gfp, unsigned int order)
+{
+ return its_alloc_shared_pages_node(NUMA_NO_NODE, gfp, order);
+}
+
+static void its_free_shared_pages(void *addr, int order)
+{
+ if (order < 0)
+ return;
+
+ swiotlb_free(&cvm_alloc_device, (struct page*)addr, (1 << order) * PAGE_SIZE);
+}
+
+static int add_its_device_order(struct its_device *dev, int itt_order)
+{
+ struct its_device_order *new;
+ unsigned long flags;
+
+ new = kmalloc(sizeof(struct its_device_order), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ new->dev = dev;
+ new->itt_order = itt_order;
+ raw_spin_lock_irqsave(&cvm_its_lock, flags);
+ list_add_tail(&new->entry, &cvm_its_nodes);
+ raw_spin_unlock_irqrestore(&cvm_its_lock, flags);
+ return 0;
+}
+
+/* get its device order and then free its device order */
+static int get_its_device_order(struct its_device *dev)
+{
+ struct its_device_order *pos, *tmp;
+ unsigned long flags;
+ int itt_order = -1;
+
+ raw_spin_lock_irqsave(&cvm_its_lock, flags);
+ list_for_each_entry_safe(pos, tmp, &cvm_its_nodes, entry) {
+ if (pos->dev == dev) {
+ itt_order = pos->itt_order;
+ list_del(&pos->entry);
+ kfree(pos);
+ goto found;
+ }
+ }
+found:
+ raw_spin_unlock_irqrestore(&cvm_its_lock, flags);
+ return itt_order;
+}
+
+static void *its_alloc_shared_page_address(struct its_device *dev, struct its_node *its, int sz)
+{
+ struct page *page;
+ int itt_order;
+
+ itt_order = get_order(sz);
+ if (add_its_device_order(dev, itt_order))
+ return NULL;
+
+ page = its_alloc_shared_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
+ itt_order);
+ if (!page)
+ return NULL;
+ return (void *)page_address(page);
+}
+#endif
+
/*
* The ITS structure - contains most of the infrastructure, with the
* top-level MSI domain, the command queue, the collections, and the
@@ -2447,7 +2535,13 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags)
{
struct page *prop_page;
- prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ prop_page = its_alloc_shared_pages(gfp_flags,
+ get_order(LPI_PROPBASE_SZ));
+ else
+#endif
+ prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
if (!prop_page)
return NULL;
@@ -2458,8 +2552,14 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags)
static void its_free_prop_table(struct page *prop_page)
{
- free_pages((unsigned long)page_address(prop_page),
- get_order(LPI_PROPBASE_SZ));
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ its_free_shared_pages(page_address(prop_page),
+ get_order(LPI_PROPBASE_SZ));
+ else
+#endif
+ free_pages((unsigned long)page_address(prop_page),
+ get_order(LPI_PROPBASE_SZ));
}
static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
@@ -2581,7 +2681,13 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
order = get_order(GITS_BASER_PAGES_MAX * psz);
}
- page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ page = its_alloc_shared_pages_node(its->numa_node,
+ GFP_KERNEL | __GFP_ZERO, order);
+ else
+#endif
+ page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
if (!page)
return -ENOMEM;
@@ -2594,7 +2700,12 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
/* 52bit PA is supported only when PageSize=64K */
if (psz != SZ_64K) {
pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
- free_pages((unsigned long)base, order);
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ its_free_shared_pages(base, order);
+ else
+#endif
+ free_pages((unsigned long)base, order);
return -ENXIO;
}
@@ -2648,7 +2759,12 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
&its->phys_base, its_base_type_string[type],
val, tmp);
- free_pages((unsigned long)base, order);
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ its_free_shared_pages(base, order);
+ else
+#endif
+ free_pages((unsigned long)base, order);
return -ENXIO;
}
@@ -2787,8 +2903,14 @@ static void its_free_tables(struct its_node *its)
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
if (its->tables[i].base) {
- free_pages((unsigned long)its->tables[i].base,
- its->tables[i].order);
+#ifdef CONFIG_CVM_GUEST
+ if (!is_cvm_world())
+ its_free_shared_pages(its->tables[i].base,
+ its->tables[i].order);
+ else
+#endif
+ free_pages((unsigned long)its->tables[i].base,
+ its->tables[i].order);
its->tables[i].base = NULL;
}
}
@@ -3051,7 +3173,13 @@ static bool allocate_vpe_l2_table(int cpu, u32 id)
/* Allocate memory for 2nd level table */
if (!table[idx]) {
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ page = its_alloc_shared_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(psz));
+ else
+#endif
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
if (!page)
return false;
@@ -3170,7 +3298,13 @@ static int allocate_vpe_l1_table(void)
pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
np, npg, psz, epp, esz);
- page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ page = its_alloc_shared_pages(GFP_ATOMIC | __GFP_ZERO,
+ get_order(np * PAGE_SIZE));
+ else
+#endif
+ page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
if (!page)
return -ENOMEM;
@@ -3218,8 +3352,14 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags)
{
struct page *pend_page;
- pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
- get_order(LPI_PENDBASE_SZ));
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ pend_page = its_alloc_shared_pages(gfp_flags | __GFP_ZERO,
+ get_order(LPI_PENDBASE_SZ));
+ else
+#endif
+ pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
+ get_order(LPI_PENDBASE_SZ));
if (!pend_page)
return NULL;
@@ -3231,7 +3371,13 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags)
static void its_free_pending_table(struct page *pt)
{
- free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ its_free_shared_pages(page_address(pt),
+ get_order(LPI_PENDBASE_SZ));
+ else
+#endif
+ free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
}
/*
@@ -3768,8 +3914,15 @@ static bool its_alloc_table_entry(struct its_node *its,
/* Allocate memory for 2nd level table */
if (!table[idx]) {
- page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
- get_order(baser->psz));
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ page = its_alloc_shared_pages_node(its->numa_node,
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(baser->psz));
+ else
+#endif
+ page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
+ get_order(baser->psz));
if (!page)
return false;
@@ -3872,7 +4025,12 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
nr_ites = max(2, nvecs);
sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
- itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ itt = its_alloc_shared_page_address(dev, its, sz);
+ else
+#endif
+ itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
if (alloc_lpis) {
lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
if (lpi_map)
@@ -3886,7 +4044,12 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
kfree(dev);
- kfree(itt);
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ its_free_shared_pages(itt, get_order(sz));
+ else
+#endif
+ kfree(itt);
kfree(lpi_map);
kfree(col_map);
return NULL;
@@ -3923,7 +4086,12 @@ static void its_free_device(struct its_device *its_dev)
list_del(&its_dev->entry);
raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
kfree(its_dev->event_map.col_map);
- kfree(its_dev->itt);
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ its_free_shared_pages(its_dev->itt, get_its_device_order(its_dev));
+ else
+#endif
+ kfree(its_dev->itt);
if (its_dev->is_vdev) {
WARN_ON(!rsv_devid_pool_cap);
@@ -5594,8 +5762,15 @@ static int __init its_probe_one(struct resource *res,
its->numa_node = numa_node;
- page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
- get_order(ITS_CMD_QUEUE_SZ));
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ page = its_alloc_shared_pages_node(its->numa_node,
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(ITS_CMD_QUEUE_SZ));
+ else
+#endif
+ page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
+ get_order(ITS_CMD_QUEUE_SZ));
if (!page) {
err = -ENOMEM;
goto out_unmap_sgir;
@@ -5661,7 +5836,12 @@ static int __init its_probe_one(struct resource *res,
out_free_tables:
its_free_tables(its);
out_free_cmd:
- free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ its_free_shared_pages(its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
+ else
+#endif
+ free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
out_unmap_sgir:
if (its->sgir_base)
iounmap(its->sgir_base);
@@ -5957,6 +6137,12 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
bool has_vtimer_irqbypass = false;
int err;
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world()) {
+ device_initialize(&cvm_alloc_device);
+ raw_spin_lock_init(&cvm_its_lock);
+ }
+#endif
gic_rdists = rdists;
its_parent = parent_domain;
--
2.33.0
1
0

08 Apr '24
From: Jingxian He <hejingxian(a)huawei.com>
hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
CVE: N/A
------------
Add bounce buffer feature for cvm guest os:
1) Cvm guest mapped memory is secure memory.
2) Qemu/kvm cannot access the secure memory.
3) Use bounce buffer as memory shared by cvm guest and qemu/kvm.
Signed-off-by: Jingxian He <hejingxian(a)huawei.com>
---
arch/arm64/configs/defconfig | 1 +
arch/arm64/configs/openeuler_defconfig | 1 +
arch/arm64/include/asm/cvm_guest.h | 21 ++++++
arch/arm64/kvm/Kconfig | 8 +++
arch/arm64/kvm/Makefile | 1 +
arch/arm64/kvm/cvm_guest.c | 90 ++++++++++++++++++++++++++
arch/arm64/mm/mmu.c | 11 ++++
arch/arm64/mm/pageattr.c | 9 ++-
include/linux/swiotlb.h | 13 ++++
kernel/dma/direct.c | 39 +++++++++++
kernel/dma/swiotlb.c | 86 +++++++++++++++++++++++-
11 files changed, 278 insertions(+), 2 deletions(-)
create mode 100644 arch/arm64/include/asm/cvm_guest.h
create mode 100644 arch/arm64/kvm/cvm_guest.c
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index ace2bf4ad..0ba4538d9 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -111,6 +111,7 @@ CONFIG_ACPI_APEI_EINJ=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
CONFIG_CVM_HOST=y
+CONFIG_CVM_GUEST=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index e298ca7e5..25a5fa5c7 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -763,6 +763,7 @@ CONFIG_IRQ_BYPASS_MANAGER=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
CONFIG_CVM_HOST=y
+CONFIG_CVM_GUEST=y
CONFIG_HAVE_KVM_IRQCHIP=y
CONFIG_HAVE_KVM_IRQFD=y
CONFIG_HAVE_KVM_IRQ_ROUTING=y
diff --git a/arch/arm64/include/asm/cvm_guest.h b/arch/arm64/include/asm/cvm_guest.h
new file mode 100644
index 000000000..3c5bda7ca
--- /dev/null
+++ b/arch/arm64/include/asm/cvm_guest.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+#ifndef __ASM_CVM_GUEST_H
+#define __ASM_CVM_GUEST_H
+
+#ifdef CONFIG_CVM_GUEST
+static inline bool cvm_mem_encrypt_active(void)
+{
+ return false;
+}
+
+int set_cvm_memory_encrypted(unsigned long addr, int numpages);
+
+int set_cvm_memory_decrypted(unsigned long addr, int numpages);
+
+bool is_cvm_world(void);
+
+#endif
+#endif
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 7c24a4d33..d21e27f74 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -54,6 +54,14 @@ config CVM_HOST
If unsure, say N.
+config CVM_GUEST
+ bool "CVM guest enable"
+ depends on KVM && SWIOTLB && ARM64
+ help
+ Support CVM guest based on S-EL2
+
+ If unsure, say N.
+
if KVM
source "virt/kvm/Kconfig"
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 3b92eaa4f..61dce3ab4 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -30,3 +30,4 @@ kvm-$(CONFIG_CVM_HOST) += cvm_exit.o
kvm-$(CONFIG_KVM_ARM_PMU) += pmu-emul.o
obj-$(CONFIG_KVM_HISI_VIRT) += hisilicon/
+obj-$(CONFIG_CVM_GUEST) += cvm_guest.o
diff --git a/arch/arm64/kvm/cvm_guest.c b/arch/arm64/kvm/cvm_guest.c
new file mode 100644
index 000000000..9df24af46
--- /dev/null
+++ b/arch/arm64/kvm/cvm_guest.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+
+#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
+#include <asm/tlbflush.h>
+
+static bool cvm_guest_enable __read_mostly;
+static int __init setup_cvm_guest(char *str)
+{
+ int ret;
+ unsigned int val;
+
+ if (!str)
+ return 0;
+
+ cvm_guest_enable = false;
+ ret = kstrtouint(str, 10, &val);
+ if (ret) {
+ pr_warn("Unable to parse cvm_guest.\n");
+ } else {
+ if (val)
+ cvm_guest_enable = true;
+ }
+ return ret;
+}
+early_param("cvm_guest", setup_cvm_guest);
+
+bool is_cvm_world(void)
+{
+ return cvm_guest_enable;
+}
+
+static int change_page_range_cvm(pte_t *ptep, unsigned long addr, void *data)
+{
+ bool encrypt = (bool)data;
+ pte_t pte = READ_ONCE(*ptep);
+ if (encrypt) {
+ if(!(pte.pte & 0x20)) {
+ return 0;
+ }
+ pte.pte = pte.pte & (~0x20);
+ } else {
+ if (pte.pte & 0x20) {
+ return 0;
+ }
+ /* Set NS BIT */
+ pte.pte = pte.pte | 0x20;
+ }
+ set_pte(ptep, pte);
+
+ return 0;
+}
+
+static int __change_memory_common_cvm(unsigned long start, unsigned long size, bool encrypt)
+{
+ int ret;
+ ret = apply_to_page_range(&init_mm, start, size, change_page_range_cvm, (void *)encrypt);
+ flush_tlb_kernel_range(start, start + size);
+ return ret;
+}
+
+static int __set_memory_encrypted(unsigned long addr,
+ int numpages,
+ bool encrypt)
+{
+ if (!is_cvm_world()) {
+ return 0;
+ }
+ WARN_ON(!__is_lm_address(addr));
+ return __change_memory_common_cvm(addr, PAGE_SIZE * numpages, encrypt);
+}
+
+int set_cvm_memory_encrypted(unsigned long addr, int numpages)
+{
+ return __set_memory_encrypted(addr, numpages, true);
+}
+
+int set_cvm_memory_decrypted(unsigned long addr, int numpages)
+{
+ return __set_memory_encrypted(addr, numpages, false);
+}
+
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 804d5197c..b6eb82f6d 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -38,6 +38,9 @@
#include <asm/ptdump.h>
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
+#ifdef CONFIG_CVM_GUEST
+#include <asm/cvm_guest.h>
+#endif
#define NO_BLOCK_MAPPINGS BIT(0)
#define NO_CONT_MAPPINGS BIT(1)
@@ -494,7 +497,11 @@ static void __init map_mem(pgd_t *pgdp)
int flags = 0, eflags = 0;
u64 i;
+#ifdef CONFIG_CVM_GUEST
+ if (rodata_full || debug_pagealloc_enabled() || is_cvm_world())
+#else
if (rodata_full || debug_pagealloc_enabled())
+#endif
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
#ifdef CONFIG_KFENCE
@@ -1514,7 +1521,11 @@ int arch_add_memory(int nid, u64 start, u64 size,
}
+#ifdef CONFIG_CVM_GUEST
+ if (rodata_full || debug_pagealloc_enabled() || is_cvm_world())
+#else
if (rodata_full || debug_pagealloc_enabled())
+#endif
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 0bc12dbf2..fe0650386 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -11,6 +11,9 @@
#include <asm/cacheflush.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>
+#ifdef CONFIG_CVM_GUEST
+#include <asm/cvm_guest.h>
+#endif
struct page_change_data {
pgprot_t set_mask;
@@ -188,7 +191,11 @@ int set_direct_map_default_noflush(struct page *page)
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
- if (!debug_pagealloc_enabled() && !rodata_full)
+#ifdef CONFIG_CVM_GUEST
+ if ((!debug_pagealloc_enabled() && !rodata_full) || is_cvm_world())
+#else
+ if ((!debug_pagealloc_enabled() && !rodata_full))
+#endif
return;
set_memory_valid((unsigned long)page_address(page), numpages, enable);
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 5d2dbe7e0..b4ca6622a 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -6,6 +6,9 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/limits.h>
+#ifdef CONFIG_CVM_GUEST
+#include <asm/cvm_guest.h>
+#endif
struct device;
struct page;
@@ -75,6 +78,16 @@ static inline bool is_swiotlb_buffer(phys_addr_t paddr)
return paddr >= io_tlb_start && paddr < io_tlb_end;
}
+#ifdef CONFIG_CVM_GUEST
+static inline bool is_swiotlb_for_alloc(struct device *dev)
+{
+ return is_cvm_world();
+}
+
+struct page *swiotlb_alloc(struct device *dev, size_t size);
+bool swiotlb_free(struct device *dev, struct page *page, size_t size);
+#endif
+
void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void);
size_t swiotlb_max_mapping_size(struct device *dev);
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 2922250f9..075e85cfb 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -75,6 +75,20 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
}
+#ifdef CONFIG_CVM_GUEST
+static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
+{
+ struct page *page = swiotlb_alloc(dev, size);
+
+ if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+ swiotlb_free(dev, page, size);
+ return NULL;
+ }
+
+ return page;
+}
+#endif
+
static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp_t gfp)
{
@@ -84,6 +98,11 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
WARN_ON_ONCE(!PAGE_ALIGNED(size));
+#ifdef CONFIG_CVM_GUEST
+ if (is_swiotlb_for_alloc(dev))
+ return dma_direct_alloc_swiotlb(dev, size);
+#endif
+
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
&phys_limit);
page = dma_alloc_contiguous(dev, size, gfp);
@@ -237,6 +256,11 @@ void *dma_direct_alloc(struct device *dev, size_t size,
return NULL;
}
out_free_pages:
+#ifdef CONFIG_CVM_GUEST
+ if (is_swiotlb_for_alloc(dev) &&
+ swiotlb_free(dev, page, size))
+ return NULL;
+#endif
dma_free_contiguous(dev, page, size);
return NULL;
}
@@ -271,6 +295,11 @@ void dma_direct_free(struct device *dev, size_t size,
else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
arch_dma_clear_uncached(cpu_addr, size);
+#ifdef CONFIG_CVM_GUEST
+ if (is_swiotlb_for_alloc(dev) &&
+ swiotlb_free(dev, dma_direct_to_page(dev, dma_addr), size))
+ return;
+#endif
dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
}
@@ -307,6 +336,11 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
return page;
out_free_pages:
+#ifdef CONFIG_CVM_GUEST
+ if (is_swiotlb_for_alloc(dev) &&
+ swiotlb_free(dev, page, size))
+ return NULL;
+#endif
dma_free_contiguous(dev, page, size);
return NULL;
}
@@ -325,6 +359,11 @@ void dma_direct_free_pages(struct device *dev, size_t size,
if (force_dma_unencrypted(dev))
set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
+#ifdef CONFIG_CVM_GUEST
+ if (is_swiotlb_for_alloc(dev) &&
+ swiotlb_free(dev, page, size))
+ return;
+#endif
dma_free_contiguous(dev, page, size);
}
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index d897d1613..579d3cb50 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -47,6 +47,10 @@
#include <linux/memblock.h>
#include <linux/iommu-helper.h>
+#ifdef CONFIG_CVM_GUEST
+#include <asm/cvm_guest.h>
+#endif
+
#define CREATE_TRACE_POINTS
#include <trace/events/swiotlb.h>
@@ -194,12 +198,20 @@ void __init swiotlb_update_mem_attributes(void)
void *vaddr;
unsigned long bytes;
+#ifdef CONFIG_CVM_GUEST
+ if (!is_cvm_world() && (no_iotlb_memory || late_alloc))
+#else
if (no_iotlb_memory || late_alloc)
+#endif
return;
vaddr = phys_to_virt(io_tlb_start);
bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ set_cvm_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
+#endif
memset(vaddr, 0, bytes);
}
@@ -265,8 +277,13 @@ swiotlb_init(int verbose)
/* Get IO TLB memory from the low pages */
vstart = memblock_alloc_low(PAGE_ALIGN(bytes), PAGE_SIZE);
- if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
+ if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) {
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ swiotlb_update_mem_attributes();
+#endif
return;
+ }
if (io_tlb_start) {
memblock_free_early(io_tlb_start,
@@ -772,3 +789,70 @@ static int __init swiotlb_create_debugfs(void)
late_initcall(swiotlb_create_debugfs);
#endif
+
+#ifdef CONFIG_CVM_GUEST
+struct page *swiotlb_alloc(struct device *dev, size_t size)
+{
+ phys_addr_t tlb_addr;
+ int index;
+
+ index = find_slots(dev, 0, size);
+ if (index == -1)
+ return NULL;
+
+ tlb_addr = slot_addr(io_tlb_start, index);
+ return pfn_to_page(PFN_DOWN(tlb_addr));
+}
+
+static void swiotlb_release_slots(struct device *hwdev, phys_addr_t tlb_addr,
+ size_t alloc_size)
+{
+ unsigned long flags;
+ unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr);
+ int i, count, nslots = nr_slots(alloc_size + offset);
+ int index = (tlb_addr - offset - io_tlb_start) >> IO_TLB_SHIFT;
+
+ /*
+ * Return the buffer to the free list by setting the corresponding
+ * entries to indicate the number of contiguous entries available.
+ * While returning the entries to the free list, we merge the entries
+ * with slots below and above the pool being returned.
+ */
+ spin_lock_irqsave(&io_tlb_lock, flags);
+ if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
+ count = io_tlb_list[index + nslots];
+ else
+ count = 0;
+
+ /*
+ * Step 1: return the slots to the free list, merging the slots with
+ * superceeding slots
+ */
+ for (i = index + nslots - 1; i >= index; i--) {
+ io_tlb_list[i] = ++count;
+ io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+ }
+
+ /*
+ * Step 2: merge the returned slots with the preceding slots, if
+ * available (non zero)
+ */
+ for (i = index - 1;
+ io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && io_tlb_list[i];
+ i--)
+ io_tlb_list[i] = ++count;
+ io_tlb_used -= nslots;
+ spin_unlock_irqrestore(&io_tlb_lock, flags);
+}
+
+bool swiotlb_free(struct device *dev, struct page *page, size_t size)
+{
+ phys_addr_t tlb_addr = page_to_phys(page);
+
+ if (!is_swiotlb_buffer(tlb_addr))
+ return false;
+
+ swiotlb_release_slots(dev, tlb_addr, size);
+ return true;
+}
+#endif
--
2.33.0
1
0
From: Jingxian He <hejingxian(a)huawei.com>
hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
CVE: N/A
------------
Enable pmu phys irq inject to Confidential VMs.
Signed-off-by: Jingxian He <hejingxian(a)huawei.com>
---
arch/arm64/include/asm/kvm_tmi.h | 5 ++++-
arch/arm64/kvm/arm.c | 24 ++++++++++++++++++++++++
arch/arm64/kvm/pmu-emul.c | 9 +++++++++
drivers/perf/arm_pmu.c | 17 +++++++++++++++++
include/linux/perf/arm_pmu.h | 3 +++
5 files changed, 57 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/kvm_tmi.h b/arch/arm64/include/asm/kvm_tmi.h
index 554b3e439..49ae4f77c 100644
--- a/arch/arm64/include/asm/kvm_tmi.h
+++ b/arch/arm64/include/asm/kvm_tmi.h
@@ -4,6 +4,7 @@
*/
#ifndef __TMM_TMI_H
#define __TMM_TMI_H
+#ifdef CONFIG_CVM_HOST
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_pgtable.h>
@@ -144,6 +145,7 @@ struct tmi_tec_exit {
uint64_t cntp_ctl;
uint64_t cntp_cval;
uint64_t imm;
+ uint64_t pmu_ovf_status;
};
struct tmi_tec_run {
@@ -370,4 +372,5 @@ unsigned long cvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu,
int kvm_cvm_vcpu_set_events(struct kvm_vcpu *vcpu,
bool serror_pending, bool ext_dabt_pending);
-#endif
+#endif
+#endif
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 32974a10e..6790b06f9 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -40,6 +40,7 @@
#include <asm/sections.h>
#ifdef CONFIG_CVM_HOST
#include <asm/kvm_tmi.h>
+#include <linux/perf/arm_pmu.h>
#endif
#include <kvm/arm_hypercalls.h>
@@ -890,6 +891,18 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
xfer_to_guest_mode_work_pending();
}
+#ifdef CONFIG_CVM_HOST
+static inline void update_pmu_phys_irq(struct kvm_vcpu *vcpu, bool *pmu_stopped)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+
+ if (pmu->irq_level) {
+ *pmu_stopped = true;
+ arm_pmu_set_phys_irq(false);
+ }
+}
+#endif
+
/**
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
* @vcpu: The VCPU pointer
@@ -934,6 +947,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
ret = 1;
run->exit_reason = KVM_EXIT_UNKNOWN;
while (ret > 0) {
+#ifdef CONFIG_CVM_HOST
+ bool pmu_stopped = false;
+#endif
/*
* Check conditions before entering the guest
*/
@@ -953,6 +969,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
preempt_disable();
kvm_pmu_flush_hwstate(vcpu);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ update_pmu_phys_irq(vcpu, &pmu_stopped);
+#endif
local_irq_disable();
@@ -1063,6 +1083,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
}
#endif
preempt_enable();
+#ifdef CONFIG_CVM_HOST
+ if (pmu_stopped)
+ arm_pmu_set_phys_irq(true);
+#endif
/*
* The ARMv8 architecture doesn't give the hypervisor
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 9fdc76c6d..00aa9ebe6 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -13,6 +13,7 @@
#include <asm/kvm_emulate.h>
#include <kvm/arm_pmu.h>
#include <kvm/arm_vgic.h>
+#include <asm/kvm_tmi.h>
static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
@@ -370,6 +371,14 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
{
u64 reg = 0;
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+ reg = run->tec_exit.pmu_ovf_status;
+ return reg;
+ }
+#endif
+
if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 4ef8aee84..743f52d94 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -797,6 +797,23 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
}
+#ifdef CONFIG_CVM_HOST
+void arm_pmu_set_phys_irq(bool enable)
+{
+ int cpu = get_cpu();
+ struct arm_pmu *pmu = per_cpu(cpu_armpmu, cpu);
+ int irq;
+
+ irq = armpmu_get_cpu_irq(pmu, cpu);
+ if (irq && !enable)
+ per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq);
+ else if (irq && enable)
+ per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq);
+
+ put_cpu();
+}
+#endif
+
#ifdef CONFIG_CPU_PM
static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
{
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 6fd58c8f9..c7a35d321 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -189,6 +189,9 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
#endif
+#ifdef CONFIG_CVM_HOST
+void arm_pmu_set_phys_irq(bool enable);
+#endif
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
struct arm_pmu *armpmu_alloc_atomic(void);
--
2.33.0
1
0
From: Jingxian He <hejingxian(a)huawei.com>
Add cvm feature patches:
1. add cvm host feature
2. enable pmu phys irq inject for cvm
3. add bounce buffer feature for cvm guest
4. add lpi support for cvm guest
arch/arm64/configs/defconfig | 2 +
arch/arm64/configs/openeuler_defconfig | 2 +
arch/arm64/include/asm/cvm_guest.h | 21 +
arch/arm64/include/asm/kvm_emulate.h | 14 +
arch/arm64/include/asm/kvm_host.h | 12 +
arch/arm64/include/asm/kvm_tmi.h | 376 +++++++++++
arch/arm64/include/asm/kvm_tmm.h | 72 +++
arch/arm64/kvm/Kconfig | 16 +
arch/arm64/kvm/Makefile | 5 +
arch/arm64/kvm/arch_timer.c | 104 +++-
arch/arm64/kvm/arm.c | 155 ++++-
arch/arm64/kvm/cvm.c | 824 +++++++++++++++++++++++++
arch/arm64/kvm/cvm_exit.c | 229 +++++++
arch/arm64/kvm/cvm_guest.c | 90 +++
arch/arm64/kvm/guest.c | 8 +
arch/arm64/kvm/hyp/vgic-v3-sr.c | 19 +
arch/arm64/kvm/mmio.c | 17 +-
arch/arm64/kvm/mmu.c | 7 +
arch/arm64/kvm/pmu-emul.c | 9 +
arch/arm64/kvm/psci.c | 12 +-
arch/arm64/kvm/reset.c | 10 +
arch/arm64/kvm/tmi.c | 148 +++++
arch/arm64/kvm/vgic/vgic-v3.c | 16 +-
arch/arm64/kvm/vgic/vgic.c | 52 +-
arch/arm64/mm/mmu.c | 11 +
arch/arm64/mm/pageattr.c | 9 +-
drivers/irqchip/irq-gic-v3-its.c | 228 ++++++-
drivers/perf/arm_pmu.c | 17 +
include/kvm/arm_arch_timer.h | 4 +
include/linux/kvm_host.h | 21 +
include/linux/perf/arm_pmu.h | 3 +
include/linux/swiotlb.h | 13 +
include/uapi/linux/kvm.h | 29 +
kernel/dma/direct.c | 39 ++
kernel/dma/swiotlb.c | 86 ++-
virt/kvm/kvm_main.c | 7 +-
36 files changed, 2646 insertions(+), 41 deletions(-)
create mode 100644 arch/arm64/include/asm/cvm_guest.h
create mode 100644 arch/arm64/include/asm/kvm_tmi.h
create mode 100644 arch/arm64/include/asm/kvm_tmm.h
create mode 100644 arch/arm64/kvm/cvm.c
create mode 100644 arch/arm64/kvm/cvm_exit.c
create mode 100644 arch/arm64/kvm/cvm_guest.c
create mode 100644 arch/arm64/kvm/tmi.c
--
2.33.0
1
0

08 Apr '24
From: Ryusuke Konishi <konishi.ryusuke(a)gmail.com>
stable inclusion
from stable-v5.10.210
commit 98a4026b22ff440c7f47056481bcbbe442f607d6
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9E2EQ
CVE: CVE-2024-26696
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 38296afe3c6ee07319e01bb249aa4bb47c07b534 upstream.
Syzbot reported a hang issue in migrate_pages_batch() called by mbind()
and nilfs_lookup_dirty_data_buffers() called in the log writer of nilfs2.
While migrate_pages_batch() locks a folio and waits for the writeback to
complete, the log writer thread that should bring the writeback to
completion picks up the folio being written back in
nilfs_lookup_dirty_data_buffers() that it calls for subsequent log
creation and was trying to lock the folio. Thus causing a deadlock.
In the first place, it is unexpected that folios/pages in the middle of
writeback will be updated and become dirty. Nilfs2 adds a checksum to
verify the validity of the log being written and uses it for recovery at
mount, so data changes during writeback are suppressed. Since this is
broken, an unclean shutdown could potentially cause recovery to fail.
Investigation revealed that the root cause is that the wait for writeback
completion in nilfs_page_mkwrite() is conditional, and if the backing
device does not require stable writes, data may be modified without
waiting.
Fix these issues by making nilfs_page_mkwrite() wait for writeback to
finish regardless of the stable write requirement of the backing device.
Link: https://lkml.kernel.org/r/20240131145657.4209-1-konishi.ryusuke@gmail.com
Fixes: 1d1d1a767206 ("mm: only enforce stable page writes if the backing device requires it")
Signed-off-by: Ryusuke Konishi <konishi.ryusuke(a)gmail.com>
Reported-by: syzbot+ee2ae68da3b22d04cd8d(a)syzkaller.appspotmail.com
Closes: https://lkml.kernel.org/r/00000000000047d819061004ad6c@google.com
Tested-by: Ryusuke Konishi <konishi.ryusuke(a)gmail.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Lin Yujun <linyujun809(a)huawei.com>
Signed-off-by: Yifan Qiao <qiaoyifan4(a)huawei.com>
---
fs/nilfs2/file.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 64bc81363c6c..3802b42e1cb4 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -105,7 +105,13 @@ static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
nilfs_transaction_commit(inode->i_sb);
mapped:
- wait_for_stable_page(page);
+ /*
+ * Since checksumming including data blocks is performed to determine
+ * the validity of the log to be written and used for recovery, it is
+ * necessary to wait for writeback to finish here, regardless of the
+ * stable write requirement of the backing device.
+ */
+ wait_on_page_writeback(page);
out:
sb_end_pagefault(inode->i_sb);
return block_page_mkwrite_return(ret);
--
2.39.2
2
1

[PATCH openEuler-1.0-LTS] nilfs2: fix hang in nilfs_lookup_dirty_data_buffers()
by Yifan Qiao 08 Apr '24
by Yifan Qiao 08 Apr '24
08 Apr '24
From: Ryusuke Konishi <konishi.ryusuke(a)gmail.com>
stable inclusion
from stable-v5.10.210
commit 98a4026b22ff440c7f47056481bcbbe442f607d6
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9E2EQ
CVE: CVE-2024-26696
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 38296afe3c6ee07319e01bb249aa4bb47c07b534 upstream.
Syzbot reported a hang issue in migrate_pages_batch() called by mbind()
and nilfs_lookup_dirty_data_buffers() called in the log writer of nilfs2.
While migrate_pages_batch() locks a folio and waits for the writeback to
complete, the log writer thread that should bring the writeback to
completion picks up the folio being written back in
nilfs_lookup_dirty_data_buffers() that it calls for subsequent log
creation and was trying to lock the folio. Thus causing a deadlock.
In the first place, it is unexpected that folios/pages in the middle of
writeback will be updated and become dirty. Nilfs2 adds a checksum to
verify the validity of the log being written and uses it for recovery at
mount, so data changes during writeback are suppressed. Since this is
broken, an unclean shutdown could potentially cause recovery to fail.
Investigation revealed that the root cause is that the wait for writeback
completion in nilfs_page_mkwrite() is conditional, and if the backing
device does not require stable writes, data may be modified without
waiting.
Fix these issues by making nilfs_page_mkwrite() wait for writeback to
finish regardless of the stable write requirement of the backing device.
Link: https://lkml.kernel.org/r/20240131145657.4209-1-konishi.ryusuke@gmail.com
Fixes: 1d1d1a767206 ("mm: only enforce stable page writes if the backing device requires it")
Signed-off-by: Ryusuke Konishi <konishi.ryusuke(a)gmail.com>
Reported-by: syzbot+ee2ae68da3b22d04cd8d(a)syzkaller.appspotmail.com
Closes: https://lkml.kernel.org/r/00000000000047d819061004ad6c@google.com
Tested-by: Ryusuke Konishi <konishi.ryusuke(a)gmail.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Lin Yujun <linyujun809(a)huawei.com>
Signed-off-by: Yifan Qiao <qiaoyifan4(a)huawei.com>
---
fs/nilfs2/file.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 64bc81363c6c..3802b42e1cb4 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -105,7 +105,13 @@ static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
nilfs_transaction_commit(inode->i_sb);
mapped:
- wait_for_stable_page(page);
+ /*
+ * Since checksumming including data blocks is performed to determine
+ * the validity of the log to be written and used for recovery, it is
+ * necessary to wait for writeback to finish here, regardless of the
+ * stable write requirement of the backing device.
+ */
+ wait_on_page_writeback(page);
out:
sb_end_pagefault(inode->i_sb);
return block_page_mkwrite_return(ret);
--
2.39.2
2
2

08 Apr '24
From: Ryusuke Konishi <konishi.ryusuke(a)gmail.com>
stable inclusion
from stable-v5.10.210
commit 98a4026b22ff440c7f47056481bcbbe442f607d6
category: bugfix
bugzilla: 189665
CVE: CVE-2024-26696
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 38296afe3c6ee07319e01bb249aa4bb47c07b534 upstream.
Syzbot reported a hang issue in migrate_pages_batch() called by mbind()
and nilfs_lookup_dirty_data_buffers() called in the log writer of nilfs2.
While migrate_pages_batch() locks a folio and waits for the writeback to
complete, the log writer thread that should bring the writeback to
completion picks up the folio being written back in
nilfs_lookup_dirty_data_buffers() that it calls for subsequent log
creation and was trying to lock the folio. Thus causing a deadlock.
In the first place, it is unexpected that folios/pages in the middle of
writeback will be updated and become dirty. Nilfs2 adds a checksum to
verify the validity of the log being written and uses it for recovery at
mount, so data changes during writeback are suppressed. Since this is
broken, an unclean shutdown could potentially cause recovery to fail.
Investigation revealed that the root cause is that the wait for writeback
completion in nilfs_page_mkwrite() is conditional, and if the backing
device does not require stable writes, data may be modified without
waiting.
Fix these issues by making nilfs_page_mkwrite() wait for writeback to
finish regardless of the stable write requirement of the backing device.
Link: https://lkml.kernel.org/r/20240131145657.4209-1-konishi.ryusuke@gmail.com
Fixes: 1d1d1a767206 ("mm: only enforce stable page writes if the backing device requires it")
Signed-off-by: Ryusuke Konishi <konishi.ryusuke(a)gmail.com>
Reported-by: syzbot+ee2ae68da3b22d04cd8d(a)syzkaller.appspotmail.com
Closes: https://lkml.kernel.org/r/00000000000047d819061004ad6c@google.com
Tested-by: Ryusuke Konishi <konishi.ryusuke(a)gmail.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Lin Yujun <linyujun809(a)huawei.com>
Signed-off-by: Yifan Qiao <qiaoyifan4(a)huawei.com>
---
fs/nilfs2/file.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 64bc81363c6c..3802b42e1cb4 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -105,7 +105,13 @@ static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
nilfs_transaction_commit(inode->i_sb);
mapped:
- wait_for_stable_page(page);
+ /*
+ * Since checksumming including data blocks is performed to determine
+ * the validity of the log to be written and used for recovery, it is
+ * necessary to wait for writeback to finish here, regardless of the
+ * stable write requirement of the backing device.
+ */
+ wait_on_page_writeback(page);
out:
sb_end_pagefault(inode->i_sb);
return block_page_mkwrite_return(ret);
--
2.39.2
2
1

[PATCH openEuler-1.0-LTS] nilfs2: fix hang in nilfs_lookup_dirty_data_buffers()
by Yifan Qiao 08 Apr '24
by Yifan Qiao 08 Apr '24
08 Apr '24
From: Ryusuke Konishi <konishi.ryusuke(a)gmail.com>
stable inclusion
from stable-v5.10.210
commit 98a4026b22ff440c7f47056481bcbbe442f607d6
category: bugfix
bugzilla: 189665
CVE: CVE-2024-26696
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 38296afe3c6ee07319e01bb249aa4bb47c07b534 upstream.
Syzbot reported a hang issue in migrate_pages_batch() called by mbind()
and nilfs_lookup_dirty_data_buffers() called in the log writer of nilfs2.
While migrate_pages_batch() locks a folio and waits for the writeback to
complete, the log writer thread that should bring the writeback to
completion picks up the folio being written back in
nilfs_lookup_dirty_data_buffers() that it calls for subsequent log
creation and was trying to lock the folio. Thus causing a deadlock.
In the first place, it is unexpected that folios/pages in the middle of
writeback will be updated and become dirty. Nilfs2 adds a checksum to
verify the validity of the log being written and uses it for recovery at
mount, so data changes during writeback are suppressed. Since this is
broken, an unclean shutdown could potentially cause recovery to fail.
Investigation revealed that the root cause is that the wait for writeback
completion in nilfs_page_mkwrite() is conditional, and if the backing
device does not require stable writes, data may be modified without
waiting.
Fix these issues by making nilfs_page_mkwrite() wait for writeback to
finish regardless of the stable write requirement of the backing device.
Link: https://lkml.kernel.org/r/20240131145657.4209-1-konishi.ryusuke@gmail.com
Fixes: 1d1d1a767206 ("mm: only enforce stable page writes if the backing device requires it")
Signed-off-by: Ryusuke Konishi <konishi.ryusuke(a)gmail.com>
Reported-by: syzbot+ee2ae68da3b22d04cd8d(a)syzkaller.appspotmail.com
Closes: https://lkml.kernel.org/r/00000000000047d819061004ad6c@google.com
Tested-by: Ryusuke Konishi <konishi.ryusuke(a)gmail.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Lin Yujun <linyujun809(a)huawei.com>
Signed-off-by: Yifan Qiao <qiaoyifan4(a)huawei.com>
---
fs/nilfs2/file.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 64bc81363c6c..3802b42e1cb4 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -105,7 +105,13 @@ static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
nilfs_transaction_commit(inode->i_sb);
mapped:
- wait_for_stable_page(page);
+ /*
+ * Since checksumming including data blocks is performed to determine
+ * the validity of the log to be written and used for recovery, it is
+ * necessary to wait for writeback to finish here, regardless of the
+ * stable write requirement of the backing device.
+ */
+ wait_on_page_writeback(page);
out:
sb_end_pagefault(inode->i_sb);
return block_page_mkwrite_return(ret);
--
2.39.2
2
1

[PATCH openEuler-1.0-LTS] nilfs2: fix hang in nilfs_lookup_dirty_data_buffers()
by Yifan Qiao 08 Apr '24
by Yifan Qiao 08 Apr '24
08 Apr '24
From: Ryusuke Konishi <konishi.ryusuke(a)gmail.com>
stable inclusion
from stable-v5.10.210
commit 98a4026b22ff440c7f47056481bcbbe442f607d6
category: bugfix
bugzilla: 189665
CVE: CVE-2024-26696
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit 38296afe3c6ee07319e01bb249aa4bb47c07b534 upstream.
Syzbot reported a hang issue in migrate_pages_batch() called by mbind()
and nilfs_lookup_dirty_data_buffers() called in the log writer of nilfs2.
While migrate_pages_batch() locks a folio and waits for the writeback to
complete, the log writer thread that should bring the writeback to
completion picks up the folio being written back in
nilfs_lookup_dirty_data_buffers() that it calls for subsequent log
creation and was trying to lock the folio. Thus causing a deadlock.
In the first place, it is unexpected that folios/pages in the middle of
writeback will be updated and become dirty. Nilfs2 adds a checksum to
verify the validity of the log being written and uses it for recovery at
mount, so data changes during writeback are suppressed. Since this is
broken, an unclean shutdown could potentially cause recovery to fail.
Investigation revealed that the root cause is that the wait for writeback
completion in nilfs_page_mkwrite() is conditional, and if the backing
device does not require stable writes, data may be modified without
waiting.
Fix these issues by making nilfs_page_mkwrite() wait for writeback to
finish regardless of the stable write requirement of the backing device.
Link: https://lkml.kernel.org/r/20240131145657.4209-1-konishi.ryusuke@gmail.com
Fixes: 1d1d1a767206 ("mm: only enforce stable page writes if the backing device requires it")
Signed-off-by: Ryusuke Konishi <konishi.ryusuke(a)gmail.com>
Reported-by: syzbot+ee2ae68da3b22d04cd8d(a)syzkaller.appspotmail.com
Closes: https://lkml.kernel.org/r/00000000000047d819061004ad6c@google.com
Tested-by: Ryusuke Konishi <konishi.ryusuke(a)gmail.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Lin Yujun <linyujun809(a)huawei.com>
Signed-off-by: Yifan Qiao <qiaoyifan4(a)huawei.com>
---
fs/nilfs2/file.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 64bc81363c6c..3802b42e1cb4 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -105,7 +105,13 @@ static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf)
nilfs_transaction_commit(inode->i_sb);
mapped:
- wait_for_stable_page(page);
+ /*
+ * Since checksumming including data blocks is performed to determine
+ * the validity of the log to be written and used for recovery, it is
+ * necessary to wait for writeback to finish here, regardless of the
+ * stable write requirement of the backing device.
+ */
+ wait_on_page_writeback(page);
out:
sb_end_pagefault(inode->i_sb);
return block_page_mkwrite_return(ret);
--
2.39.2
2
1
From: Dan Carpenter <dan.carpenter(a)linaro.org>
stable inclusion
from stable-v5.15.81
commit ae4acad41b0f93f1c26cc0fc9135bb79d8282d0b
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I9DNXE
CVE: CVE-2023-52631
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=…
--------------------------------
[ Upstream commit b2dd7b953c25ffd5912dda17e980e7168bebcf6c ]
The issue here is when this is called from ntfs_load_attr_list(). The
"size" comes from le32_to_cpu(attr->res.data_size) so it can't overflow
on a 64bit systems but on 32bit systems the "+ 1023" can overflow and
the result is zero. This means that the kmalloc will succeed by
returning the ZERO_SIZE_PTR and then the memcpy() will crash with an
Oops on the next line.
Fixes: be71b5cba2e6 ("fs/ntfs3: Add attrib operations")
Signed-off-by: Dan Carpenter <dan.carpenter(a)linaro.org>
Signed-off-by: Konstantin Komarov <almaz.alexandrovich(a)paragon-software.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Long Li <leo.lilong(a)huawei.com>
---
fs/ntfs3/ntfs_fs.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index 7e84f0060133..e0c8f59d62ab 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -466,7 +466,7 @@ bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
int al_update(struct ntfs_inode *ni, int sync);
static inline size_t al_aligned(size_t size)
{
- return (size + 1023) & ~(size_t)1023;
+ return size_add(size, 1023) & ~(size_t)1023;
}
/* Globals from bitfunc.c */
--
2.31.1
2
1

[PATCH OLK-6.6 00/51] ext4: use iomap for regular file's buffered IO path and enable large foilo
by Zhang Yi 08 Apr '24
by Zhang Yi 08 Apr '24
08 Apr '24
This series convert ext4 buffered IO path from buffered_head to iomap,
and enable large folio by default.
01-14: ioamp map multiple blocks pre ->map_blocks by Christoph, backport
from [1].
15: A small debug improvement for the previous series in iomap
map_blocks [2].
16-24: fix a stale zero data issue in xfs and make iomap_zero_ranege
don't increase i_size [3].
25-29: the first part of prepartory changes have been merged to
upstream [4].
30-36: the second part of prepartory changes, support adding
multi-delalloc blocks [5].
37-51: comvert buffered_head to iomap, these are picked up from the my
v3 series [6].
[1] https://lore.kernel.org/linux-fsdevel/20231207072710.176093-1-hch@lst.de/
[2] https://lore.kernel.org/linux-fsdevel/20240220115759.3445025-1-yi.zhang@hua…
[3] https://lore.kernel.org/linux-xfs/20240320110548.2200662-1-yi.zhang@huaweic…
[4] https://lore.kernel.org/linux-ext4/20240105033018.1665752-1-yi.zhang@huawei…
[5] https://lore.kernel.org/linux-ext4/20240330120236.3789589-1-yi.zhang@huawei…
[6] https://lore.kernel.org/linux-ext4/20240127015825.1608160-1-yi.zhang@huawei…
Thanks,
Yi.
Christoph Hellwig (14):
iomap: clear the per-folio dirty bits on all writeback failures
iomap: treat inline data in iomap_writepage_map as an I/O error
iomap: move the io_folios field out of struct iomap_ioend
iomap: move the PF_MEMALLOC check to iomap_writepages
iomap: factor out a iomap_writepage_handle_eof helper
iomap: move all remaining per-folio logic into iomap_writepage_map
iomap: clean up the iomap_alloc_ioend calling convention
iomap: move the iomap_sector sector calculation out of
iomap_add_to_ioend
iomap: don't chain bios
iomap: only call mapping_set_error once for each failed bio
iomap: factor out a iomap_writepage_map_block helper
iomap: submit ioends immediately
iomap: map multiple blocks at a time
iomap: pass the length of the dirty region to ->map_blocks
Zhang Yi (37):
iomap: add pos and dirty_len into trace_iomap_writepage_map
xfs: match lock mode in xfs_buffered_write_iomap_begin()
xfs: make the seq argument to xfs_bmapi_convert_delalloc() optional
xfs: make xfs_bmapi_convert_delalloc() to allocate the target offset
xfs: convert delayed extents to unwritten when zeroing post eof blocks
iomap: drop the write failure handles when unsharing and zeroing
iomap: don't increase i_size if it's not a write operation
iomap: use a new variable to handle the written bytes in
iomap_write_iter()
iomap: make iomap_write_end() return a boolean
iomap: do some small logical cleanup in buffered write
ext4: refactor ext4_da_map_blocks()
ext4: convert to exclusive lock while inserting delalloc extents
ext4: add a hole extent entry in cache after punch
ext4: make ext4_map_blocks() distinguish delalloc only extent
ext4: make ext4_set_iomap() recognize IOMAP_DELALLOC map type
ext4: trim delalloc extent
ext4: drop iblock parameter
ext4: make ext4_es_insert_delayed_block() insert multi-blocks
ext4: make ext4_da_reserve_space() reserve multi-clusters
ext4: factor out check for whether a cluster is allocated
ext4: make ext4_insert_delayed_block() insert multi-blocks
ext4: make ext4_da_map_blocks() buffer_head unaware
ext4: use reserved metadata blocks when splitting extent on endio
ext4: factor out ext4_map_{create|query}_blocks()
ext4: introduce seq counter for the extent status entry
ext4: add a new iomap aops for regular file's buffered IO path
ext4: implement buffered read iomap path
ext4: implement buffered write iomap path
ext4: implement writeback iomap path
ext4: implement mmap iomap path
ext4: implement zero_range iomap path
ext4: writeback partial blocks before zeroing out range
ext4: fall back to buffer_head path for defrag
ext4: partial enable iomap for regular file's buffered IO path
filemap: support disable large folios on active inode
ext4: enable large folio for regular file with iomap buffered IO path
ext4: add mount option for buffered IO iomap path
block/fops.c | 2 +-
fs/ext4/ext4.h | 15 +-
fs/ext4/ext4_jbd2.c | 6 +
fs/ext4/extents.c | 42 +-
fs/ext4/extents_status.c | 76 ++-
fs/ext4/extents_status.h | 5 +-
fs/ext4/file.c | 19 +-
fs/ext4/ialloc.c | 5 +
fs/ext4/inode.c | 935 +++++++++++++++++++++++++++---------
fs/ext4/move_extent.c | 35 ++
fs/ext4/page-io.c | 107 +++++
fs/ext4/super.c | 21 +
fs/gfs2/bmap.c | 2 +-
fs/iomap/buffered-io.c | 683 +++++++++++++-------------
fs/iomap/trace.h | 43 +-
fs/xfs/libxfs/xfs_bmap.c | 40 +-
fs/xfs/xfs_aops.c | 63 +--
fs/xfs/xfs_iomap.c | 39 +-
fs/zonefs/file.c | 3 +-
include/linux/iomap.h | 19 +-
include/linux/pagemap.h | 14 +
include/trace/events/ext4.h | 42 +-
mm/readahead.c | 6 +-
23 files changed, 1550 insertions(+), 672 deletions(-)
--
2.39.2
3
54

[PATCH OLK-6.6] firmware: arm_sdei: Move sdei_cpuhp_up/down() before lockup_detector_online_cpu()
by Xiongfeng Wang 08 Apr '24
by Xiongfeng Wang 08 Apr '24
08 Apr '24
hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I9EYSX
--------------------------------
commit 58c81b6ed03f ("firmware: arm_sdei: Fix sleep from invalid context
BUG") move sdei_cpuhp_up/down() after lockup_detector_online_cpu().
sdei_watchdog is enabled in lockup_detector_online_cpu(). It fails
because it is enabled before sdei_cpuhp_up(). This commit move
sdei_cpuhp_up() before lockup_detector_online_cpu().
Signed-off-by: Xiongfeng Wang <wangxiongfeng2(a)huawei.com>
---
drivers/firmware/arm_sdei.c | 21 +++++++--------------
include/linux/cpuhotplug.h | 1 +
2 files changed, 8 insertions(+), 14 deletions(-)
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index 0f7ef69071c0..fe638e40aebb 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -43,8 +43,6 @@ static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
/* entry point from firmware to arch asm code */
static unsigned long sdei_entry_point;
-static int sdei_hp_state;
-
struct sdei_event {
/* These three are protected by the sdei_list_lock */
struct list_head list;
@@ -785,7 +783,7 @@ static int sdei_device_freeze(struct device *dev)
int err;
/* unregister private events */
- cpuhp_remove_state(sdei_entry_point);
+ cpuhp_remove_state(CPUHP_AP_ARM_SDEI_ONLINE);
err = sdei_unregister_shared();
if (err)
@@ -806,15 +804,12 @@ static int sdei_device_thaw(struct device *dev)
return err;
}
- err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
+ err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_ONLINE, "SDEI",
&sdei_cpuhp_up, &sdei_cpuhp_down);
- if (err < 0) {
+ if (err)
pr_warn("Failed to re-register CPU hotplug notifier...\n");
- return err;
- }
- sdei_hp_state = err;
- return 0;
+ return err;
}
static int sdei_device_restore(struct device *dev)
@@ -846,7 +841,7 @@ static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
* We are going to reset the interface, after this there is no point
* doing work when we take CPUs offline.
*/
- cpuhp_remove_state(sdei_hp_state);
+ cpuhp_remove_state(CPUHP_AP_ARM_SDEI_ONLINE);
sdei_platform_reset();
@@ -1026,15 +1021,13 @@ static int sdei_probe(struct platform_device *pdev)
goto remove_cpupm;
}
- err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
+ err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_ONLINE, "SDEI",
&sdei_cpuhp_up, &sdei_cpuhp_down);
- if (err < 0) {
+ if (err) {
pr_warn("Failed to register CPU hotplug notifier...\n");
goto remove_reboot;
}
- sdei_hp_state = err;
-
return 0;
remove_reboot:
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index f94a1b8e34e0..a33500a53d25 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -246,6 +246,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE,
CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
CPUHP_AP_PERF_CSKY_ONLINE,
+ CPUHP_AP_ARM_SDEI_ONLINE,
CPUHP_AP_WATCHDOG_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RANDOM_ONLINE,
--
2.20.1
2
1
From: Jingxian He <hejingxian(a)huawei.com>
hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
CVE: N/A
------------
Add host support for Confidential VMs:
1) Add new kvm_type for cvm.
2) Init cvm related data while user create vm with cvm type.
3) Add cvm hypervisor while run in sel2 which named tmm.
4) Kvm call tmm interface to create cvm stage2 pagetable and run cvm.
Signed-off-by: Jingxian He <hejingxian(a)huawei.com>
---
arch/arm64/configs/defconfig | 1 +
arch/arm64/configs/openeuler_defconfig | 1 +
arch/arm64/include/asm/kvm_emulate.h | 14 +
arch/arm64/include/asm/kvm_host.h | 12 +
arch/arm64/include/asm/kvm_tmi.h | 373 +++++++++++
arch/arm64/include/asm/kvm_tmm.h | 72 +++
arch/arm64/kvm/Kconfig | 8 +
arch/arm64/kvm/Makefile | 4 +
arch/arm64/kvm/arch_timer.c | 104 +++-
arch/arm64/kvm/arm.c | 131 +++-
arch/arm64/kvm/cvm.c | 824 +++++++++++++++++++++++++
arch/arm64/kvm/cvm_exit.c | 229 +++++++
arch/arm64/kvm/guest.c | 8 +
arch/arm64/kvm/hyp/vgic-v3-sr.c | 19 +
arch/arm64/kvm/mmio.c | 17 +-
arch/arm64/kvm/mmu.c | 7 +
arch/arm64/kvm/psci.c | 12 +-
arch/arm64/kvm/reset.c | 10 +
arch/arm64/kvm/tmi.c | 148 +++++
arch/arm64/kvm/vgic/vgic-v3.c | 16 +-
arch/arm64/kvm/vgic/vgic.c | 52 +-
include/kvm/arm_arch_timer.h | 4 +
include/linux/kvm_host.h | 21 +
include/uapi/linux/kvm.h | 29 +
virt/kvm/kvm_main.c | 7 +-
25 files changed, 2105 insertions(+), 18 deletions(-)
create mode 100644 arch/arm64/include/asm/kvm_tmi.h
create mode 100644 arch/arm64/include/asm/kvm_tmm.h
create mode 100644 arch/arm64/kvm/cvm.c
create mode 100644 arch/arm64/kvm/cvm_exit.c
create mode 100644 arch/arm64/kvm/tmi.c
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index d025bafcc..ace2bf4ad 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -110,6 +110,7 @@ CONFIG_ACPI_APEI_MEMORY_FAILURE=y
CONFIG_ACPI_APEI_EINJ=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
+CONFIG_CVM_HOST=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 5ad5e4378..e298ca7e5 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -762,6 +762,7 @@ CONFIG_ACPI_PPTT=y
CONFIG_IRQ_BYPASS_MANAGER=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
+CONFIG_CVM_HOST=y
CONFIG_HAVE_KVM_IRQCHIP=y
CONFIG_HAVE_KVM_IRQFD=y
CONFIG_HAVE_KVM_IRQ_ROUTING=y
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index fb3e3f613..ab1aebd1f 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -556,4 +556,18 @@ static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
return test_bit(feature, vcpu->arch.features);
}
+#ifdef CONFIG_CVM_HOST
+static inline bool kvm_is_cvm(struct kvm *kvm)
+{
+ if (static_branch_unlikely(&kvm_cvm_is_available)) {
+ return kvm->arch.is_cvm;
+ }
+ return false;
+}
+
+static inline enum cvm_state kvm_cvm_state(struct kvm *kvm)
+{
+ return READ_ONCE(kvm->arch.cvm.state);
+}
+#endif
#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 8bb67dfb9..01b8f9331 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -26,6 +26,9 @@
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/thread_info.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmm.h>
+#endif
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -127,6 +130,11 @@ struct kvm_arch {
cpumask_t *dvm_cpumask; /* Union of all vcpu's cpus_ptr */
u64 lsudvmbm_el2;
#endif
+
+#ifdef CONFIG_CVM_HOST
+ struct cvm cvm;
+ bool is_cvm;
+#endif
};
struct kvm_vcpu_fault_info {
@@ -405,6 +413,10 @@ struct kvm_vcpu_arch {
cpumask_t *cpus_ptr;
cpumask_t *pre_cpus_ptr;
#endif
+
+#ifdef CONFIG_CVM_HOST
+ struct cvm_tec tec;
+#endif
};
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
diff --git a/arch/arm64/include/asm/kvm_tmi.h b/arch/arm64/include/asm/kvm_tmi.h
new file mode 100644
index 000000000..554b3e439
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_tmi.h
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+#ifndef __TMM_TMI_H
+#define __TMM_TMI_H
+#include <linux/kvm_host.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_pgtable.h>
+#include <linux/virtio_ring.h>
+
+#define GRANULE_SIZE 4096
+
+#define NO_NUMA -1
+
+#define TMM_TTT_LEVEL_3 3
+
+#ifdef CONFIG_CVM_HOST_FVP_PLAT
+#define CVM_MEM_BASE ULL(0x8800000000) /* choose FVP platform to run cVM */
+#define VQ_NUM 3
+#else
+#define CVM_MEM_BASE ULL(0x800000000) /* choose qemu platform to run cVM */
+#define VQ_NUM 3
+#endif
+
+#define MEM_SEG_NUMS 2
+
+/* define in QEMU hw/arm/virt.c */
+#define VIRT_PCIE_MMIO 0x10000000 /* 256MB */
+#define VIRT_PCIE_MMIO_SIZE 0x1000000 /* 16MB */
+#define VIRT_HIGH_PCIE_ECAM 0x8000000000 /* 512GB */
+#define VIRT_HIGH_PCIE_ECAM_SIZE 0x12000000 /* 288MB */
+
+/* TMI error codes. */
+#define TMI_SUCCESS 0
+#define TMI_ERROR_INPUT 1
+#define TMI_ERROR_MEMORY 2
+#define TMI_ERROR_ALIAS 3
+#define TMI_ERROR_IN_USE 4
+#define TMI_ERROR_CVM_STATE 5
+#define TMI_ERROR_OWNER 6
+#define TMI_ERROR_TEC 7
+#define TMI_ERROR_TTT_WALK 8
+#define TMI_ERROR_TTT_ENTRY 9
+#define TMI_ERROR_NOT_SUPPORTED 10
+#define TMI_ERROR_INTERNAL 11
+#define TMI_ERROR_CVM_POWEROFF 12
+
+#define TMI_RETURN_STATUS(ret) ((ret) & 0xFF)
+#define TMI_RETURN_INDEX(ret) (((ret) >> 8) & 0xFF)
+
+#define TMI_FEATURE_REGISTER_0_S2SZ GENMASK(7, 0)
+#define TMI_FEATURE_REGISTER_0_LPA2 BIT(8)
+#define TMI_FEATURE_REGISTER_0_SVE_EN BIT(9)
+#define TMI_FEATURE_REGISTER_0_SVE_VL GENMASK(13, 10)
+#define TMI_FEATURE_REGISTER_0_NUM_BPS GENMASK(17, 14)
+#define TMI_FEATURE_REGISTER_0_NUM_WPS GENMASK(21, 18)
+#define TMI_FEATURE_REGISTER_0_PMU_EN BIT(22)
+#define TMI_FEATURE_REGISTER_0_PMU_NUM_CTRS GENMASK(27, 23)
+#define TMI_FEATURE_REGISTER_0_HASH_SHA_256 BIT(28)
+#define TMI_FEATURE_REGISTER_0_HASH_SHA_512 BIT(29)
+
+#define TMI_CVM_PARAM_FLAG_LPA2 BIT(0)
+#define TMI_CVM_PARAM_FLAG_SVE BIT(1)
+#define TMI_CVM_PARAM_FLAG_PMU BIT(2)
+
+/*
+ * Many of these fields are smaller than u64 but all fields have u64
+ * alignment, so use u64 to ensure correct alignment.
+ */
+typedef struct tmi_cvm_params {
+ u64 flags;
+ u64 s2sz;
+ u64 sve_vl;
+ u64 num_bps;
+ u64 num_wps;
+ u64 pmu_num_cnts;
+ u64 measurement_algo;
+ u64 vmid;
+ u64 ns_vtcr;
+ u64 vttbr_el2;
+ u64 ttt_base;
+ s64 ttt_level_start;
+ u64 ttt_num_start;
+ u8 rpv[64]; /* Bits 512 */
+} tmi_cvm_params_t;
+
+#define TMI_NOT_RUNNABLE 0
+#define TMI_RUNNABLE 1
+
+/*
+ * The number of GPRs (starting from X0) that are
+ * configured by the host when a TEC is created.
+ */
+#define TEC_CREATE_NR_GPRS (8U)
+
+struct tmi_tec_params {
+ uint64_t gprs[TEC_CREATE_NR_GPRS];
+ uint64_t pc;
+ uint64_t flags;
+ uint64_t ram_size;
+};
+
+#define TEC_ENTRY_FLAG_EMUL_MMIO (1UL << 0U)
+#define TEC_ENTRY_FLAG_INJECT_SEA (1UL << 1U)
+#define TEC_ENTRY_FLAG_TRAP_WFI (1UL << 2U)
+#define TEC_ENTRY_FLAG_TRAP_WFE (1UL << 3U)
+
+#define TMI_EXIT_SYNC 0
+#define TMI_EXIT_IRQ 1
+#define TMI_EXIT_FIQ 2
+#define TMI_EXIT_PSCI 3
+#define TMI_EXIT_HOST_CALL 5
+#define TMI_EXIT_SERROR 6
+
+/*
+ * The number of GPRs (starting from X0) per voluntary exit context.
+ * Per SMCCC.
+ */
+ #define TEC_EXIT_NR_GPRS (31U)
+
+/* Maximum number of Interrupt Controller List Registers. */
+#define TEC_GIC_NUM_LRS (16U)
+
+struct tmi_tec_entry {
+ uint64_t flags;
+ uint64_t gprs[TEC_EXIT_NR_GPRS];
+ uint64_t gicv3_lrs[TEC_GIC_NUM_LRS];
+ uint64_t gicv3_hcr;
+};
+
+struct tmi_tec_exit {
+ uint64_t exit_reason;
+ uint64_t esr;
+ uint64_t far;
+ uint64_t hpfar;
+ uint64_t gprs[TEC_EXIT_NR_GPRS];
+ uint64_t gicv3_hcr;
+ uint64_t gicv3_lrs[TEC_GIC_NUM_LRS];
+ uint64_t gicv3_misr;
+ uint64_t gicv3_vmcr;
+ uint64_t cntv_ctl;
+ uint64_t cntv_cval;
+ uint64_t cntp_ctl;
+ uint64_t cntp_cval;
+ uint64_t imm;
+};
+
+struct tmi_tec_run {
+ struct tmi_tec_entry tec_entry;
+ struct tmi_tec_exit tec_exit;
+};
+
+#define TMI_FNUM_MIN_VALUE U(0x150)
+#define TMI_FNUM_MAX_VALUE U(0x18F)
+
+/******************************************************************************
+ * Bit definitions inside the function id as per the SMC calling convention
+ ******************************************************************************/
+#define FUNCID_TYPE_SHIFT 31
+#define FUNCID_CC_SHIFT 30
+#define FUNCID_OEN_SHIFT 24
+#define FUNCID_NUM_SHIFT 0
+
+#define FUNCID_TYPE_MASK 0x1
+#define FUNCID_CC_MASK 0x1
+#define FUNCID_OEN_MASK 0x3f
+#define FUNCID_NUM_MASK 0xffff
+
+#define FUNCID_TYPE_WIDTH 1
+#define FUNCID_CC_WIDTH 1
+#define FUNCID_OEN_WIDTH 6
+#define FUNCID_NUM_WIDTH 16
+
+#define SMC_64 1
+#define SMC_32 0
+#define SMC_TYPE_FAST 1
+#define SMC_TYPE_STD 0
+
+/*****************************************************************************
+ * Owning entity number definitions inside the function id as per the SMC
+ * calling convention
+ *****************************************************************************/
+#define OEN_ARM_START 0
+#define OEN_ARM_END 0
+#define OEN_CPU_START 1
+#define OEN_CPU_END 1
+#define OEN_SIP_START 2
+#define OEN_SIP_END 2
+#define OEN_OEM_START 3
+#define OEN_OEM_END 3
+#define OEN_STD_START 4 /* Standard Calls */
+#define OEN_STD_END 4
+#define OEN_TAP_START 48 /* Trusted Applications */
+#define OEN_TAP_END 49
+#define OEN_TOS_START 50 /* Trusted OS */
+#define OEN_TOS_END 63
+#define OEN_LIMIT 64
+
+/* Get TMI fastcall std FID from funtion number */
+#define TMI_FID(smc_cc, func_num) \
+ ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT) | \
+ ((smc_cc) << FUNCID_CC_SHIFT) | \
+ (OEN_STD_START << FUNCID_OEN_SHIFT) | \
+ ((func_num) << FUNCID_NUM_SHIFT) )
+
+#define U(_x) (_x##U)
+
+/*
+ * SMC_TMM_INIT_COMPLETE is the only function in the TMI that originates from
+ * the CVM world and is handled by the SPMD. The remaining functions are
+ * always invoked by the Normal world, forward by SPMD and handled by the
+ * TMM.
+ */
+#define TMI_FNUM_VERSION U(0x260)
+#define TMI_FNUM_MEM_ALLOC U(0x261)
+#define TMI_FNUM_MEM_FREE U(0x262)
+#define TMI_FNUM_DATA_CREATE U(0x263)
+#define TMI_FNUM_DATA_DESTROY U(0x265)
+#define TMI_FNUM_CVM_ACTIVATE U(0x267)
+#define TMI_FNUM_CVM_CREATE U(0x268)
+#define TMI_FNUM_CVM_DESTROY U(0x269)
+#define TMI_FNUM_TEC_CREATE U(0x27A)
+#define TMI_FNUM_TEC_DESTROY U(0x27B)
+#define TMI_FNUM_TEC_ENTER U(0x27C)
+#define TMI_FNUM_TTT_CREATE U(0x27D)
+#define TMI_FNUM_TTT_DESTROY U(0x27E)
+#define TMI_FNUM_TTT_MAP_UNPROTECTED U(0x27F)
+#define TMI_FNUM_TTT_MAP_PROTECTED U(0x280)
+#define TMI_FNUM_TTT_UNMAP_UNPROTECTED U(0x282)
+#define TMI_FNUM_TTT_UNMAP_PROTECTED U(0x283)
+#define TMI_FNUM_PSCI_COMPLETE U(0x284)
+#define TMI_FNUM_FEATURES U(0x285)
+#define TMI_FNUM_TTT_MAP_RANGE U(0x286)
+#define TMI_FNUM_TTT_UNMAP_RANGE U(0x287)
+
+/* TMI SMC64 PIDs handled by the SPMD */
+#define TMI_TMM_VESION TMI_FID(SMC_64, TMI_FNUM_VERSION)
+#define TMI_TMM_DATA_CREATE TMI_FID(SMC_64, TMI_FNUM_DATA_CREATE)
+#define TMI_TMM_DATA_DESTROY TMI_FID(SMC_64, TMI_FNUM_DATA_DESTROY)
+#define TMI_TMM_CVM_ACTIVATE TMI_FID(SMC_64, TMI_FNUM_CVM_ACTIVATE)
+#define TMI_TMM_CVM_CREATE TMI_FID(SMC_64, TMI_FNUM_CVM_CREATE)
+#define TMI_TMM_CVM_DESTROY TMI_FID(SMC_64, TMI_FNUM_CVM_DESTROY)
+#define TMI_TMM_TEC_CREATE TMI_FID(SMC_64, TMI_FNUM_TEC_CREATE)
+#define TMI_TMM_TEC_DESTROY TMI_FID(SMC_64, TMI_FNUM_TEC_DESTROY)
+#define TMI_TMM_TEC_ENTER TMI_FID(SMC_64, TMI_FNUM_TEC_ENTER)
+#define TMI_TMM_TTT_CREATE TMI_FID(SMC_64, TMI_FNUM_TTT_CREATE)
+#define TMI_TMM_TTT_DESTROY TMI_FID(SMC_64, TMI_FNUM_TTT_DESTROY)
+#define TMI_TMM_TTT_MAP_UNPROTECTED TMI_FID(SMC_64, TMI_FNUM_TTT_MAP_UNPROTECTED)
+#define TMI_TMM_TTT_MAP_PROTECTED TMI_FID(SMC_64, TMI_FNUM_TTT_MAP_PROTECTED)
+#define TMI_TMM_TTT_UNMAP_UNPROTECTED TMI_FID(SMC_64, TMI_FNUM_TTT_UNMAP_UNPROTECTED)
+#define TMI_TMM_TTT_UNMAP_PROTECTED TMI_FID(SMC_64, TMI_FNUM_TTT_UNMAP_PROTECTED)
+#define TMI_TMM_PSCI_COMPLETE TMI_FID(SMC_64, TMI_FNUM_PSCI_COMPLETE)
+#define TMI_TMM_FEATURES TMI_FID(SMC_64, TMI_FNUM_FEATURES)
+#define TMI_TMM_MEM_ALLOC TMI_FID(SMC_64, TMI_FNUM_MEM_ALLOC)
+#define TMI_TMM_MEM_FREE TMI_FID(SMC_64, TMI_FNUM_MEM_FREE)
+#define TMI_TMM_TTT_MAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_MAP_RANGE)
+#define TMI_TMM_TTT_UNMAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_UNMAP_RANGE)
+
+#define TMI_ABI_VERSION_GET_MAJOR(_version) ((_version) >> 16)
+#define TMI_ABI_VERSION_GET_MINOR(_version) ((_version) & 0xFFFF)
+
+#define TMI_ABI_VERSION_MAJOR U(0x0)
+
+/* KVM_CAP_ARM_TMM on VM fd */
+#define KVM_CAP_ARM_TMM_CONFIG_CVM_HOST 0
+#define KVM_CAP_ARM_TMM_CREATE_CVM 1
+#define KVM_CAP_ARM_TMM_INIT_IPA_CVM 2
+#define KVM_CAP_ARM_TMM_POPULATE_CVM 3
+#define KVM_CAP_ARM_TMM_ACTIVATE_CVM 4
+
+#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA256 0
+#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA512 1
+
+#define KVM_CAP_ARM_TMM_RPV_SIZE 64
+
+/* List of configuration items accepted for KVM_CAP_ARM_TMM_CONFIG_CVM_HOST */
+#define KVM_CAP_ARM_TMM_CFG_RPV 0
+#define KVM_CAP_ARM_TMM_CFG_HASH_ALGO 1
+#define KVM_CAP_ARM_TMM_CFG_SVE 2
+#define KVM_CAP_ARM_TMM_CFG_DBG 3
+#define KVM_CAP_ARM_TMM_CFG_PMU 4
+
+DECLARE_STATIC_KEY_FALSE(kvm_cvm_is_available);
+DECLARE_STATIC_KEY_FALSE(kvm_cvm_is_enable);
+
+struct kvm_cap_arm_tmm_config_item {
+ __u32 cfg;
+ union {
+ /* cfg == KVM_CAP_ARM_TMM_CFG_RPV */
+ struct {
+ __u8 rpv[KVM_CAP_ARM_TMM_RPV_SIZE];
+ };
+
+ /* cfg == KVM_CAP_ARM_TMM_CFG_HASH_ALGO */
+ struct {
+ __u32 hash_algo;
+ };
+
+ /* cfg == KVM_CAP_ARM_TMM_CFG_SVE */
+ struct {
+ __u32 sve_vq;
+ };
+
+ /* cfg == KVM_CAP_ARM_TMM_CFG_DBG */
+ struct {
+ __u32 num_brps;
+ __u32 num_wrps;
+ };
+
+ /* cfg == KVM_CAP_ARM_TMM_CFG_PMU */
+ struct {
+ __u32 num_pmu_cntrs;
+ };
+ /* Fix the size of the union */
+ __u8 reserved[256];
+ };
+};
+
+enum tmi_tmm_mem_type {
+ TMM_MEM_TYPE_RD,
+ TMM_MEM_TYPE_TEC,
+ TMM_MEM_TYPE_TTT,
+ TMM_MEM_TYPE_CVM_PA,
+};
+
+enum tmi_tmm_map_size {
+ TMM_MEM_MAP_SIZE_4K,
+ TMM_MEM_MAP_SIZE_2M,
+ TMM_MEM_MAP_SIZE_1G,
+ TMM_MEM_MAP_SIZE_MAX,
+};
+
+static inline bool tmm_is_addr_ttt_level_aligned(uint64_t addr, int level)
+{
+ uint64_t mask = (1 << (12 + 9 * (3 - level))) - 1;
+ return (addr & mask) == 0;
+}
+
+u64 phys_to_cvm_phys(u64 phys);
+
+u64 tmi_version(void);
+u64 tmi_data_create(u64 data, u64 rd, u64 map_addr, u64 src, u64 level);
+u64 tmi_data_destroy(u64 rd, u64 map_addr, u64 level);
+u64 tmi_cvm_activate(u64 rd);
+u64 tmi_cvm_create(u64 rd, u64 params_ptr);
+u64 tmi_cvm_destroy(u64 rd);
+u64 tmi_tec_create(u64 tec, u64 rd, u64 mpidr, u64 params_ptr);
+u64 tmi_tec_destroy(u64 tec);
+u64 tmi_tec_enter(u64 tec, u64 run_ptr);
+u64 tmi_ttt_create(u64 ttt, u64 rd, u64 map_addr, u64 level);
+u64 tmi_ttt_destroy(u64 ttt, u64 rd, u64 map_addr, u64 level);
+u64 tmi_ttt_map_unprotected(u64 rd, u64 map_addr, u64 level, u64 ttte);
+u64 tmi_ttt_unmap_unprotected(u64 rd, u64 map_addr, u64 level, u64 ns);
+u64 tmi_ttt_unmap_protected(u64 rd, u64 map_addr, u64 level);
+u64 tmi_psci_complete(u64 calling_tec, u64 target_tec);
+u64 tmi_features(u64 index);
+u64 tmi_ttt_map_range(u64 rd, u64 map_addr, u64 size, u64 cur_node, u64 target_node);
+u64 tmi_ttt_unmap_range(u64 rd, u64 map_addr, u64 size, u64 node_id);
+
+u64 tmi_mem_alloc(u64 rd, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type,
+ enum tmi_tmm_map_size tmm_map_size);
+u64 tmi_mem_free(u64 pa, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type,
+ enum tmi_tmm_map_size tmm_map_size);
+
+void kvm_cvm_vcpu_put(struct kvm_vcpu *vcpu);
+int kvm_load_user_data(struct kvm *kvm, unsigned long arg);
+unsigned long cvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu,
+ unsigned long target_affinity, unsigned long lowest_affinity_level);
+int kvm_cvm_vcpu_set_events(struct kvm_vcpu *vcpu,
+ bool serror_pending, bool ext_dabt_pending);
+
+#endif
diff --git a/arch/arm64/include/asm/kvm_tmm.h b/arch/arm64/include/asm/kvm_tmm.h
new file mode 100644
index 000000000..41383494f
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_tmm.h
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+#ifndef __ASM_KVM_TMM_H
+#define __ASM_KVM_TMM_H
+
+#include <uapi/linux/kvm.h>
+
+enum cvm_state {
+ CVM_STATE_NONE,
+ CVM_STATE_NEW,
+ CVM_STATE_ACTIVE,
+ CVM_STATE_DYING
+};
+
+struct cvm {
+ enum cvm_state state;
+ u32 cvm_vmid;
+ u64 rd;
+ u64 loader_start;
+ u64 initrd_start;
+ u64 initrd_size;
+ u64 ram_size;
+ struct kvm_numa_info numa_info;
+ struct tmi_cvm_params *params;
+};
+
+/*
+ * struct cvm_tec - Additional per VCPU data for a CVM
+ */
+struct cvm_tec {
+ u64 tec;
+ bool tec_created;
+ void *tec_run;
+};
+
+int kvm_init_tmm(void);
+int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap);
+int kvm_init_cvm_vm(struct kvm *kvm);
+void kvm_destroy_cvm(struct kvm *kvm);
+int kvm_create_tec(struct kvm_vcpu *vcpu);
+void kvm_destroy_tec(struct kvm_vcpu *vcpu);
+int kvm_tec_enter(struct kvm_vcpu *vcpu);
+int handle_cvm_exit(struct kvm_vcpu *vcpu, int rec_run_status);
+int kvm_arm_create_cvm(struct kvm *kvm);
+void kvm_free_rd(struct kvm *kvm);
+int cvm_create_rd(struct kvm *kvm);
+int kvm_arm_cvm_first_run(struct kvm_vcpu *vcpu);
+int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target);
+int kvm_arch_tec_init(struct kvm_vcpu *vcpu);
+
+void kvm_cvm_unmap_destroy_range(struct kvm *kvm);
+
+#define CVM_TTT_BLOCK_LEVEL 2
+#define CVM_TTT_MAX_LEVEL 3
+
+#define CVM_PAGE_SHIFT 12
+#define CVM_PAGE_SIZE BIT(CVM_PAGE_SHIFT)
+#define CVM_TTT_LEVEL_SHIFT(l) \
+ ((CVM_PAGE_SHIFT - 3) * (4 - (l)) + 3)
+#define CVM_L2_BLOCK_SIZE BIT(CVM_TTT_LEVEL_SHIFT(2))
+
+static inline unsigned long cvm_ttt_level_mapsize(int level)
+{
+ if (WARN_ON(level > CVM_TTT_BLOCK_LEVEL))
+ return CVM_PAGE_SIZE;
+
+ return (1UL << CVM_TTT_LEVEL_SHIFT(level));
+}
+
+#endif
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index d984a6041..7c24a4d33 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -46,6 +46,14 @@ menuconfig KVM
If unsure, say N.
+config CVM_HOST
+ bool "CVM_Enabled"
+ depends on KVM && ARM64
+ help
+ Support CVM based on S-EL2
+
+ If unsure, say N.
+
if KVM
source "virt/kvm/Kconfig"
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 395d65165..3b92eaa4f 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -24,5 +24,9 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
vgic/vgic-its.o vgic/shadow_dev.o vgic/vgic-debug.o
+kvm-$(CONFIG_CVM_HOST) += tmi.o
+kvm-$(CONFIG_CVM_HOST) += cvm.o
+kvm-$(CONFIG_CVM_HOST) += cvm_exit.o
+
kvm-$(CONFIG_KVM_ARM_PMU) += pmu-emul.o
obj-$(CONFIG_KVM_HISI_VIRT) += hisilicon/
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index f9d6a5cd4..a2443d8da 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -16,6 +16,10 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
+
#include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h>
@@ -138,10 +142,79 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
}
}
+#ifdef CONFIG_CVM_HOST
+static bool cvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
+{
+ return timer_ctx &&
+ ((timer_get_ctl(timer_ctx) &
+ (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
+}
+
+void kvm_cvm_timers_update(struct kvm_vcpu *vcpu)
+{
+ int i;
+ u64 cval, now;
+ bool status, level;
+ struct arch_timer_context *timer;
+ struct arch_timer_cpu *arch_timer = &vcpu->arch.timer_cpu;
+
+ for (i = 0; i < NR_KVM_TIMERS; i++) {
+ timer = &arch_timer->timers[i];
+
+ if (!timer->loaded) {
+ if (!cvm_timer_irq_can_fire(timer))
+ continue;
+ cval = timer_get_cval(timer);
+ now = kvm_phys_timer_read() - timer_get_offset(timer);
+ level = (cval <= now);
+ kvm_timer_update_irq(vcpu, level, timer);
+ } else {
+ status = timer_get_ctl(timer) & ARCH_TIMER_CTRL_IT_STAT;
+ level = cvm_timer_irq_can_fire(timer) && status;
+ if (level != timer->irq.level)
+ kvm_timer_update_irq(vcpu, level, timer);
+ }
+ }
+}
+
+static void set_cvm_timers_loaded(struct kvm_vcpu *vcpu, bool loaded)
+{
+ int i;
+ struct arch_timer_cpu *arch_timer = &vcpu->arch.timer_cpu;
+
+ for (i = 0; i < NR_KVM_TIMERS; i++) {
+ struct arch_timer_context *timer = &arch_timer->timers[i];
+ timer->loaded = loaded;
+ }
+}
+
+static void kvm_timer_blocking(struct kvm_vcpu *vcpu);
+static void kvm_timer_unblocking(struct kvm_vcpu *vcpu);
+
+static inline void cvm_vcpu_load_timer_callback(struct kvm_vcpu *vcpu)
+{
+ kvm_cvm_timers_update(vcpu);
+ kvm_timer_unblocking(vcpu);
+ set_cvm_timers_loaded(vcpu, true);
+}
+
+static inline void cvm_vcpu_put_timer_callback(struct kvm_vcpu *vcpu)
+{
+ set_cvm_timers_loaded(vcpu, false);
+ if (rcuwait_active(kvm_arch_vcpu_get_wait(vcpu)))
+ kvm_timer_blocking(vcpu);
+}
+#endif
+
static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
+#ifdef CONFIG_CVM_HOST
+ if (kvm_is_cvm(vcpu->kvm))
+ return;
+#endif
+
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
__vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset;
@@ -667,6 +740,13 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct timer_map map;
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ cvm_vcpu_load_timer_callback(vcpu);
+ return;
+ }
+#endif
+
if (unlikely(!timer->enabled))
return;
@@ -752,6 +832,13 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
struct timer_map map;
struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ cvm_vcpu_put_timer_callback(vcpu);
+ return;
+ }
+#endif
+
if (unlikely(!timer->enabled))
return;
@@ -898,7 +985,12 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
ptimer->vcpu = vcpu;
/* Synchronize cntvoff across all vtimers of a VM. */
- update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
+#ifdef CONFIG_CVM_HOST
+ if (kvm_is_cvm(vcpu->kvm))
+ update_vtimer_cntvoff(vcpu, 0);
+ else
+#endif
+ update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
timer_set_offset(ptimer, 0);
hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
@@ -1356,6 +1448,16 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
return -EINVAL;
}
+#ifdef CONFIG_CVM_HOST
+ /*
+ * We don't use mapped IRQs for CVM because the TMI doesn't allow
+ * us setting the LR.HW bit in the VGIC.
+ */
+ if (vcpu_is_tec(vcpu)) {
+ return 0;
+ }
+#endif
+
get_timer_map(vcpu, &map);
if (vtimer_is_irqbypass())
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 718f6060b..32974a10e 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -38,6 +38,9 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/sections.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
#include <kvm/arm_hypercalls.h>
#include <kvm/arm_pmu.h>
@@ -108,6 +111,12 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
r = 0;
kvm->arch.return_nisv_io_abort_to_user = true;
break;
+#ifdef CONFIG_CVM_HOST
+ case KVM_CAP_ARM_TMM:
+ if (static_branch_unlikely(&kvm_cvm_is_available))
+ r = kvm_cvm_enable_cap(kvm, cap);
+ break;
+#endif
default:
r = -EINVAL;
break;
@@ -149,13 +158,29 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return ret;
#endif
+#ifdef CONFIG_CVM_HOST
+ if (kvm_arm_cvm_type(type)) {
+ ret = cvm_create_rd(kvm);
+ if (ret)
+ return ret;
+ }
+#endif
+
ret = kvm_arm_setup_stage2(kvm, type);
if (ret)
+#ifdef CONFIG_CVM_HOST
+ goto out_free_rd;
+#else
return ret;
+#endif
ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu);
if (ret)
+#ifdef CONFIG_CVM_HOST
+ goto out_free_rd;
+#else
return ret;
+#endif
ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
if (ret)
@@ -167,10 +192,21 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.max_vcpus = kvm_arm_default_max_vcpus();
set_default_csv2(kvm);
+#ifdef CONFIG_CVM_HOST
+ if (kvm_arm_cvm_type(type)) {
+ ret = kvm_init_cvm_vm(kvm);
+ if (ret)
+ goto out_free_stage2_pgd;
+ }
+#endif
return ret;
out_free_stage2_pgd:
kvm_free_stage2_pgd(&kvm->arch.mmu);
+#ifdef CONFIG_CVM_HOST
+out_free_rd:
+ kvm_free_rd(kvm);
+#endif
return ret;
}
@@ -203,6 +239,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
}
}
atomic_set(&kvm->online_vcpus, 0);
+#ifdef CONFIG_CVM_HOST
+ if (kvm_is_cvm(kvm))
+ kvm_destroy_cvm(kvm);
+#endif
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -258,11 +298,21 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = 1;
break;
case KVM_CAP_STEAL_TIME:
+#ifdef CONFIG_CVM_HOST
+ if (kvm && kvm_is_cvm(kvm))
+ r = 0;
+ else
+#endif
r = kvm_arm_pvtime_supported();
break;
case KVM_CAP_ARM_VIRT_MSI_BYPASS:
r = sdev_enable;
break;
+#ifdef CONFIG_CVM_HOST
+ case KVM_CAP_ARM_TMM:
+ r = static_key_enabled(&kvm_cvm_is_available);
+ break;
+#endif
default:
r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
break;
@@ -358,6 +408,13 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
return err;
#endif
+#ifdef CONFIG_CVM_HOST
+ if (kvm_is_cvm(vcpu->kvm)) {
+ err = kvm_arch_tec_init(vcpu);
+ if (err)
+ return err;
+ }
+#endif
return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
}
@@ -444,8 +501,23 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu->cpu = cpu;
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ if (single_task_running())
+ vcpu_clear_wfx_traps(vcpu);
+ else
+ vcpu_set_wfx_traps(vcpu);
+ }
+#endif
kvm_vgic_load(vcpu);
kvm_timer_vcpu_load(vcpu);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
+ kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
+ return;
+ }
+#endif
if (has_vhe())
kvm_vcpu_load_sysregs_vhe(vcpu);
kvm_arch_vcpu_load_fp(vcpu);
@@ -472,6 +544,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ kvm_cvm_vcpu_put(vcpu);
+ return;
+ }
+#endif
kvm_arch_vcpu_put_fp(vcpu);
if (has_vhe())
kvm_vcpu_put_sysregs_vhe(vcpu);
@@ -662,6 +740,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* Tell the rest of the code that there are userspace irqchip
* VMs in the wild.
*/
+#ifdef CONFIG_CVM_HOST
+ if (!kvm_is_cvm(kvm))
+#endif
static_branch_inc(&userspace_irqchip_in_use);
}
@@ -830,7 +911,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
ret = kvm_vcpu_first_run_init(vcpu);
if (ret)
return ret;
-
+#ifdef CONFIG_CVM_HOST
+ if (kvm_is_cvm(vcpu->kvm)) {
+ ret = kvm_arm_cvm_first_run(vcpu);
+ if (ret)
+ return ret;
+ }
+#endif
if (run->exit_reason == KVM_EXIT_MMIO) {
ret = kvm_handle_mmio_return(vcpu);
if (ret)
@@ -905,8 +992,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
trace_kvm_entry(vcpu->vcpu_id, *vcpu_pc(vcpu));
guest_enter_irqoff();
-
- ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ ret = kvm_tec_enter(vcpu);
+ else
+#endif
+ ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->stat.exits++;
@@ -961,11 +1052,16 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
* guest time.
*/
guest_exit();
- trace_kvm_exit(vcpu->vcpu_id, ret, *vcpu_pc(vcpu));
-
- /* Exit types that need handling before we can be preempted */
- handle_exit_early(vcpu, ret);
+#ifdef CONFIG_CVM_HOST
+ if (!vcpu_is_tec(vcpu)) {
+#endif
+ trace_kvm_exit(vcpu->vcpu_id, ret, *vcpu_pc(vcpu));
+ /* Exit types that need handling before we can be preempted */
+ handle_exit_early(vcpu, ret);
+#ifdef CONFIG_CVM_HOST
+ }
+#endif
preempt_enable();
/*
@@ -986,8 +1082,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
vcpu->arch.target = -1;
ret = ARM_EXCEPTION_IL;
}
-
- ret = handle_exit(vcpu, ret);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ ret = handle_cvm_exit(vcpu, ret);
+ else
+#endif
+ ret = handle_exit(vcpu, ret);
update_vcpu_stat_time(&vcpu->stat);
}
@@ -1419,6 +1519,11 @@ long kvm_arch_vm_ioctl(struct file *filp,
void __user *argp = (void __user *)arg;
switch (ioctl) {
+#ifdef CONFIG_CVM_HOST
+ case KVM_LOAD_USER_DATA: {
+ return kvm_load_user_data(kvm, arg);
+ }
+#endif
case KVM_CREATE_IRQCHIP: {
int ret;
if (!vgic_present)
@@ -1950,7 +2055,13 @@ int kvm_arch_init(void *opaque)
kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n");
return -ENODEV;
}
-
+#ifdef CONFIG_CVM_HOST
+ if (static_branch_unlikely(&kvm_cvm_is_enable) && in_hyp_mode) {
+ err = kvm_init_tmm();
+ if (err)
+ return err;
+ }
+#endif
if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
cpus_have_final_cap(ARM64_WORKAROUND_1508412))
kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
diff --git a/arch/arm64/kvm/cvm.c b/arch/arm64/kvm/cvm.c
new file mode 100644
index 000000000..11f82c07c
--- /dev/null
+++ b/arch/arm64/kvm/cvm.c
@@ -0,0 +1,824 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+#include <linux/kvm_host.h>
+#include <linux/kvm.h>
+#include <asm/kvm_tmi.h>
+#include <asm/kvm_pgtable.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_mmu.h>
+#include <asm/stage2_pgtable.h>
+#include <linux/arm-smccc.h>
+#include <kvm/arm_hypercalls.h>
+#include <kvm/arm_psci.h>
+
+/* Protects access to cvm_vmid_bitmap */
+static DEFINE_SPINLOCK(cvm_vmid_lock);
+static unsigned long *cvm_vmid_bitmap;
+DEFINE_STATIC_KEY_FALSE(kvm_cvm_is_available);
+DEFINE_STATIC_KEY_FALSE(kvm_cvm_is_enable);
+
+static int __init setup_cvm_host(char *str)
+{
+ int ret;
+ unsigned int val;
+
+ if (!str)
+ return 0;
+
+ ret = kstrtouint(str, 10, &val);
+ if (ret) {
+ pr_warn("Unable to parse cvm_guest.\n");
+ } else {
+ if (val)
+ static_branch_enable(&kvm_cvm_is_enable);
+ }
+ return ret;
+}
+early_param("cvm_host", setup_cvm_host);
+
+u64 cvm_phys_to_phys(u64 phys)
+{
+ return phys;
+}
+
+u64 phys_to_cvm_phys(u64 phys)
+{
+ return phys;
+}
+
+static int cvm_vmid_init(void)
+{
+ unsigned int vmid_count = 1 << kvm_get_vmid_bits();
+
+ cvm_vmid_bitmap = bitmap_zalloc(vmid_count, GFP_KERNEL);
+ if (!cvm_vmid_bitmap) {
+ kvm_err("%s: Couldn't allocate cvm vmid bitmap\n", __func__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static unsigned long tmm_feat_reg0;
+
+static bool tmm_supports(unsigned long feature)
+{
+ return !!u64_get_bits(tmm_feat_reg0, feature);
+}
+
+bool kvm_cvm_supports_sve(void)
+{
+ return tmm_supports(TMI_FEATURE_REGISTER_0_SVE_EN);
+}
+
+bool kvm_cvm_supports_pmu(void)
+{
+ return tmm_supports(TMI_FEATURE_REGISTER_0_PMU_EN);
+}
+
+u32 kvm_cvm_ipa_limit(void)
+{
+ return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_S2SZ);
+}
+
+u32 kvm_cvm_get_num_brps(void)
+{
+ return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_NUM_BPS);
+}
+
+u32 kvm_cvm_get_num_wrps(void)
+{
+ return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_NUM_WPS);
+}
+
+static int cvm_vmid_reserve(void)
+{
+ int ret;
+ unsigned int vmid_count = 1 << kvm_get_vmid_bits();
+
+ spin_lock(&cvm_vmid_lock);
+ ret = bitmap_find_free_region(cvm_vmid_bitmap, vmid_count, 0);
+ spin_unlock(&cvm_vmid_lock);
+
+ return ret;
+}
+
+static void cvm_vmid_release(unsigned int vmid)
+{
+ spin_lock(&cvm_vmid_lock);
+ bitmap_release_region(cvm_vmid_bitmap, vmid, 0);
+ spin_unlock(&cvm_vmid_lock);
+}
+
+static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
+{
+ u64 shift = ARM64_HW_PGTABLE_LEVEL_SHIFT(pgt->start_level - 1);
+ u64 mask = BIT(pgt->ia_bits) - 1;
+
+ return (addr & mask) >> shift;
+}
+
+static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
+{
+ struct kvm_pgtable pgt = {
+ .ia_bits = ia_bits,
+ .start_level = start_level,
+ };
+ return __kvm_pgd_page_idx(&pgt, -1ULL) + 1;
+}
+
+int kvm_arm_create_cvm(struct kvm *kvm)
+{
+ int ret;
+ struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
+ unsigned int pgd_sz;
+
+ if (!kvm_is_cvm(kvm) || kvm_cvm_state(kvm) != CVM_STATE_NONE) {
+ return 0;
+ }
+
+ ret = cvm_vmid_reserve();
+ if (ret < 0) {
+ return ret;
+ }
+ kvm->arch.cvm.cvm_vmid = ret;
+
+ pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level);
+
+ kvm->arch.cvm.params->ttt_base = phys_to_cvm_phys(kvm->arch.mmu.pgd_phys);
+ kvm->arch.cvm.params->measurement_algo = 0;
+ kvm->arch.cvm.params->ttt_level_start = kvm->arch.mmu.pgt->start_level;
+ kvm->arch.cvm.params->ttt_num_start = pgd_sz;
+ kvm->arch.cvm.params->s2sz = VTCR_EL2_IPA(kvm->arch.vtcr);
+ kvm->arch.cvm.params->vmid = kvm->arch.cvm.cvm_vmid;
+ kvm->arch.cvm.params->ns_vtcr = kvm->arch.vtcr;
+ kvm->arch.cvm.params->vttbr_el2 = kvm->arch.mmu.pgd_phys;
+ ret = tmi_cvm_create(kvm->arch.cvm.rd, __pa(kvm->arch.cvm.params));
+ if (!ret) {
+ kvm_info("KVM creates cVM: %d\n", kvm->arch.cvm.cvm_vmid);
+ }
+
+ WRITE_ONCE(kvm->arch.cvm.state, CVM_STATE_NEW);
+ kfree(kvm->arch.cvm.params);
+ kvm->arch.cvm.params = NULL;
+ return ret;
+}
+
+int cvm_create_rd(struct kvm *kvm)
+{
+ if (!static_key_enabled(&kvm_cvm_is_available))
+ return -EFAULT;
+
+ kvm->arch.cvm.rd = tmi_mem_alloc(kvm->arch.cvm.rd, NO_NUMA, TMM_MEM_TYPE_RD, TMM_MEM_MAP_SIZE_MAX);
+ if (!kvm->arch.cvm.rd) {
+ kvm_err("tmi_mem_alloc for cvm rd failed: %d\n", kvm->arch.cvm.cvm_vmid);
+ return -ENOMEM;
+ }
+ kvm->arch.is_cvm = true;
+ return 0;
+}
+
+void kvm_free_rd(struct kvm *kvm)
+{
+ int ret;
+
+ if (!kvm->arch.cvm.rd)
+ return;
+
+ ret = tmi_mem_free(kvm->arch.cvm.rd, NO_NUMA, TMM_MEM_TYPE_RD, TMM_MEM_MAP_SIZE_MAX);
+ if (ret)
+ kvm_err("tmi_mem_free for cvm rd failed: %d\n", kvm->arch.cvm.cvm_vmid);
+ else
+ kvm->arch.cvm.rd = 0;
+}
+
+void kvm_destroy_cvm(struct kvm *kvm)
+{
+ uint32_t cvm_vmid = kvm->arch.cvm.cvm_vmid;
+
+ if (kvm->arch.cvm.params) {
+ kfree(kvm->arch.cvm.params);
+ kvm->arch.cvm.params = NULL;
+ }
+
+ if (kvm_cvm_state(kvm) == CVM_STATE_NONE)
+ return;
+
+ cvm_vmid_release(cvm_vmid);
+
+ WRITE_ONCE(kvm->arch.cvm.state, CVM_STATE_DYING);
+
+ if (!tmi_cvm_destroy(kvm->arch.cvm.rd)) {
+ kvm_info("KVM has destroyed cVM: %d\n", kvm->arch.cvm.cvm_vmid);
+ }
+
+ kvm_free_rd(kvm);
+}
+
+static int kvm_get_host_numa_node_by_ipa(uint64_t ipa, struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct kvm_numa_info *numa_info = &vcpu->kvm->arch.cvm.numa_info;
+ for (i = 0; i < numa_info->numa_cnt && i < MAX_NUMA_NODE; i++) {
+ struct kvm_numa_node *numa_node = &numa_info->numa_nodes[i];
+ if (ipa >= numa_node->ipa_start && ipa < (numa_node->ipa_start + numa_node->ipa_size)) {
+ return numa_node->host_numa_node;
+ }
+ }
+ return NO_NUMA;
+}
+
+static int kvm_cvm_ttt_create(struct cvm *cvm,
+ unsigned long addr,
+ int level,
+ phys_addr_t phys)
+{
+ addr = ALIGN_DOWN(addr, cvm_ttt_level_mapsize(level - 1));
+ return tmi_ttt_create(phys, cvm->rd, addr, level);
+}
+
+int kvm_cvm_create_ttt_levels(struct kvm *kvm, struct cvm *cvm,
+ unsigned long ipa,
+ int level,
+ int max_level,
+ struct kvm_mmu_memory_cache *mc)
+{
+ if (WARN_ON(level == max_level))
+ return 0;
+
+ while (level++ < max_level) {
+ phys_addr_t ttt;
+ ttt = tmi_mem_alloc(cvm->rd, NO_NUMA, TMM_MEM_TYPE_TTT, TMM_MEM_MAP_SIZE_MAX);
+ if (ttt == 0)
+ return -ENOMEM;
+
+ if (kvm_cvm_ttt_create(cvm, ipa, level, ttt)) {
+ (void)tmi_mem_free(ttt, NO_NUMA, TMM_MEM_TYPE_TTT, TMM_MEM_MAP_SIZE_MAX);
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+static int kvm_cvm_create_protected_data_page(struct kvm *kvm, struct cvm *cvm, unsigned long ipa, int level,
+ struct page *src_page, phys_addr_t dst_phys)
+{
+ phys_addr_t src_phys;
+ int ret;
+
+ src_phys = page_to_phys(src_page);
+ ret = tmi_data_create(dst_phys, cvm->rd, ipa, src_phys, level);
+ if (TMI_RETURN_STATUS(ret) == TMI_ERROR_TTT_WALK) {
+ /* Create missing RTTs and retry */
+ int level_fault = TMI_RETURN_INDEX(ret);
+ ret = kvm_cvm_create_ttt_levels(kvm, cvm, ipa, level_fault,
+ level, NULL);
+ if (ret)
+ goto err;
+ ret = tmi_data_create(dst_phys, cvm->rd, ipa, src_phys, level);
+ }
+ WARN_ON(ret);
+
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ return -ENXIO;
+}
+
+static u64 cvm_granule_size(u32 level)
+{
+ return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
+}
+
+int kvm_cvm_populate_par_region(struct kvm *kvm,
+ phys_addr_t ipa_base,
+ phys_addr_t ipa_end,
+ phys_addr_t dst_phys)
+{
+ struct cvm *cvm = &kvm->arch.cvm;
+ struct kvm_memory_slot *memslot;
+ gfn_t base_gfn, end_gfn;
+ int idx;
+ phys_addr_t ipa;
+ int ret = 0;
+ int level = TMM_TTT_LEVEL_3;
+ unsigned long map_size = cvm_granule_size(level);
+
+ base_gfn = gpa_to_gfn(ipa_base);
+ end_gfn = gpa_to_gfn(ipa_end);
+
+ idx = srcu_read_lock(&kvm->srcu);
+ memslot = gfn_to_memslot(kvm, base_gfn);
+ if (!memslot) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* We require the region to be contained within a single memslot */
+ if (memslot->base_gfn + memslot->npages < end_gfn) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mmap_read_lock(current->mm);
+
+ ipa = ipa_base;
+ while (ipa < ipa_end) {
+ struct page *page;
+ kvm_pfn_t pfn;
+
+ /*
+ * FIXME: This causes over mapping, but there's no good
+ * solution here with the ABI as it stands
+ */
+ ipa = ALIGN_DOWN(ipa, map_size);
+
+ pfn = gfn_to_pfn_memslot(memslot, gpa_to_gfn(ipa));
+
+ if (is_error_pfn(pfn)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ page = pfn_to_page(pfn);
+
+ ret = kvm_cvm_create_protected_data_page(kvm, cvm, ipa, level, page, dst_phys);
+ if (ret)
+ goto err_release_pfn;
+
+ ipa += map_size;
+ dst_phys += map_size;
+ kvm_release_pfn_dirty(pfn);
+err_release_pfn:
+ if (ret) {
+ kvm_release_pfn_clean(pfn);
+ break;
+ }
+ }
+
+ mmap_read_unlock(current->mm);
+out:
+ srcu_read_unlock(&kvm->srcu, idx);
+ return ret;
+}
+
+static int kvm_sel2_map_protected_ipa(struct kvm_vcpu *vcpu)
+{
+ int ret = 0;
+ gpa_t gpa, gpa_data_end, gpa_end, data_size;
+ u64 map_size, dst_phys;
+ u64 l2_granule = cvm_granule_size(2); /* 2MB */
+ u64 numa_id;
+ int cur_numa_id;
+
+ /* 2MB alignment below addresses*/
+ gpa = vcpu->kvm->arch.cvm.loader_start;
+ gpa_end = vcpu->kvm->arch.cvm.loader_start + vcpu->kvm->arch.cvm.ram_size;
+ data_size = vcpu->kvm->arch.cvm.initrd_start - vcpu->kvm->arch.cvm.loader_start +
+ vcpu->kvm->arch.cvm.initrd_size;
+ data_size = round_up(data_size, l2_granule);
+ gpa_data_end = vcpu->kvm->arch.cvm.loader_start + data_size + l2_granule;
+ gpa = round_down(gpa, l2_granule);
+ gpa_end = round_up(gpa_end, l2_granule);
+ gpa_data_end = round_up(gpa_data_end, l2_granule);
+
+ /* get numa_id */
+ numa_id = kvm_get_host_numa_node_by_ipa(gpa, vcpu);
+ map_size = l2_granule;
+ do {
+ dst_phys = tmi_mem_alloc(vcpu->kvm->arch.cvm.rd, numa_id, TMM_MEM_TYPE_CVM_PA, map_size);
+ if (!dst_phys) {
+ ret = -ENOMEM;
+ kvm_err("[%s] call tmi_mem_alloc failed.\n", __func__);
+ goto out;
+ }
+
+ /* Try to call tmi_data_create to copy kernel data, and call tmi_data_create
+ *to map all necessary gpa for system boot, only copy the data_size is not enough
+ *to boot kernel, here, we copy and map another 2MB. */
+ ret = kvm_cvm_populate_par_region(vcpu->kvm, gpa, gpa + map_size, dst_phys);
+ if (ret) {
+ ret = -EFAULT;
+ kvm_err("kvm_cvm_populate_par_region fail:%d.\n", ret);
+ goto out;
+ }
+ gpa += map_size;
+ dst_phys += map_size;
+ } while (gpa < gpa_data_end);
+
+ cur_numa_id = numa_node_id();
+ if (cur_numa_id < 0) {
+ ret = -EFAULT;
+ kvm_err("get current numa node fail\n");
+ goto out;
+ }
+
+ /* Map gpa range to secure mem without copy data from host.
+ * The cvm gpa map pages will free by destroy cvm. */
+ ret = tmi_ttt_map_range(vcpu->kvm->arch.cvm.rd, gpa_data_end,
+ gpa_end - gpa_data_end, cur_numa_id, numa_id);
+ if (ret)
+ kvm_err("tmi_ttt_map_range fail:%d.\n", ret);
+out:
+ return ret;
+}
+
+int kvm_create_tec(struct kvm_vcpu *vcpu)
+{
+ int ret;
+ int i;
+ struct tmi_tec_params *params_ptr;
+ struct user_pt_regs *vcpu_regs = vcpu_gp_regs(vcpu);
+ uint64_t mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
+
+ params_ptr = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
+ if (!params_ptr) {
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < TEC_CREATE_NR_GPRS; ++i) {
+ params_ptr->gprs[i] = vcpu_regs->regs[i];
+ }
+
+ params_ptr->pc = vcpu_regs->pc;
+
+ if (vcpu->vcpu_id == 0) {
+ params_ptr->flags = TMI_RUNNABLE;
+ } else {
+ params_ptr->flags = TMI_NOT_RUNNABLE;
+ }
+ params_ptr->ram_size = vcpu->kvm->arch.cvm.ram_size;
+ ret = tmi_tec_create(vcpu->arch.tec.tec, vcpu->kvm->arch.cvm.rd, mpidr, __pa(params_ptr));
+
+ kfree(params_ptr);
+
+ return ret;
+}
+
+static int kvm_create_all_tecs(struct kvm *kvm)
+{
+ int ret = 0;
+ struct kvm_vcpu *vcpu;
+ unsigned long i;
+
+ if (READ_ONCE(kvm->arch.cvm.state) == CVM_STATE_ACTIVE) {
+ return -1;
+ }
+
+ mutex_lock(&kvm->lock);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!vcpu->arch.tec.tec_created) {
+ ret = kvm_create_tec(vcpu);
+ if (ret) {
+ mutex_unlock(&kvm->lock);
+ return ret;
+ }
+ vcpu->arch.tec.tec_created = true;
+ }
+ }
+ mutex_unlock(&kvm->lock);
+ return ret;
+}
+
+static int config_cvm_sve(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *cfg)
+{
+ tmi_cvm_params_t *params = kvm->arch.cvm.params;
+
+ int max_sve_vq = u64_get_bits(tmm_feat_reg0,
+ TMI_FEATURE_REGISTER_0_SVE_VL);
+
+ if (!kvm_cvm_supports_sve())
+ return -EINVAL;
+
+ if (cfg->sve_vq > max_sve_vq)
+ return -EINVAL;
+
+ params->sve_vl = cfg->sve_vq;
+ params->flags |= TMI_CVM_PARAM_FLAG_SVE;
+
+ return 0;
+}
+
+static int config_cvm_pmu(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *cfg)
+{
+ tmi_cvm_params_t *params = kvm->arch.cvm.params;
+
+ int max_pmu_num_ctrs = u64_get_bits(tmm_feat_reg0,
+ TMI_FEATURE_REGISTER_0_PMU_NUM_CTRS);
+
+ if (!kvm_cvm_supports_pmu())
+ return -EINVAL;
+
+ if (cfg->num_pmu_cntrs > max_pmu_num_ctrs)
+ return -EINVAL;
+
+ params->pmu_num_cnts = cfg->num_pmu_cntrs;
+ params->flags |= TMI_CVM_PARAM_FLAG_PMU;
+
+ return 0;
+}
+
+static int kvm_tmm_config_cvm(struct kvm *kvm, struct kvm_enable_cap *cap)
+{
+ struct kvm_cap_arm_tmm_config_item cfg;
+ int r = 0;
+
+ if (kvm_cvm_state(kvm) != CVM_STATE_NONE)
+ return -EBUSY;
+
+ if (copy_from_user(&cfg, (void __user *)cap->args[1], sizeof(cfg))) {
+ return -EFAULT;
+ }
+
+ switch (cfg.cfg) {
+ case KVM_CAP_ARM_TMM_CFG_SVE:
+ r = config_cvm_sve(kvm, &cfg);
+ break;
+ case KVM_CAP_ARM_TMM_CFG_PMU:
+ r = config_cvm_pmu(kvm, &cfg);
+ break;
+ default:
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
+{
+ int r = 0;
+
+ mutex_lock(&kvm->lock);
+ switch (cap->args[0]) {
+ case KVM_CAP_ARM_TMM_CONFIG_CVM_HOST:
+ r = kvm_tmm_config_cvm(kvm, cap);
+ break;
+ case KVM_CAP_ARM_TMM_CREATE_CVM:
+ r = kvm_arm_create_cvm(kvm);
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+ mutex_unlock(&kvm->lock);
+
+ return r;
+}
+
+void kvm_destroy_tec(struct kvm_vcpu *vcpu)
+{
+ int ret = 0;
+ if (!vcpu_is_tec(vcpu)) {
+ return;
+ }
+
+ if (tmi_tec_destroy(vcpu->arch.tec.tec) != 0) {
+ kvm_err("%s vcpu id : %d failed!\n", __func__, vcpu->vcpu_id);
+ }
+
+ ret = tmi_mem_free(vcpu->arch.tec.tec, NO_NUMA, TMM_MEM_TYPE_TEC, TMM_MEM_MAP_SIZE_MAX);
+ if (ret != 0) {
+ kvm_err("tmi_mem_free for cvm tec failed\n");
+ }
+ vcpu->arch.tec.tec = 0;
+ kfree(vcpu->arch.tec.tec_run);
+}
+
+static int tmi_check_version(void)
+{
+ uint64_t res;
+ int version_major;
+ int version_minor;
+
+ res = tmi_version();
+ if (res == SMCCC_RET_NOT_SUPPORTED) {
+ return -ENXIO;
+ }
+
+ version_major = TMI_ABI_VERSION_GET_MAJOR(res);
+ version_minor = TMI_ABI_VERSION_GET_MINOR(res);
+
+ if (version_major != TMI_ABI_VERSION_MAJOR) {
+ kvm_err("Unsupported TMI_ABI (version %d %d)\n", version_major,
+ version_minor);
+ return -ENXIO;
+ }
+
+ kvm_info("TMI ABI version %d,%d\n", version_major, version_minor);
+ return 0;
+}
+
+static int kvm_kick_boot_vcpu(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ unsigned long i;
+
+ if (READ_ONCE(kvm->arch.cvm.state) == CVM_STATE_ACTIVE) {
+ return 0;
+ }
+
+ mutex_lock(&kvm->lock);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (i == 0) {
+ kvm_vcpu_kick(vcpu);
+ }
+ }
+ mutex_unlock(&kvm->lock);
+ return 0;
+}
+
+int kvm_arm_cvm_first_run(struct kvm_vcpu *vcpu)
+{
+ int ret = 0;
+
+ if (READ_ONCE(vcpu->kvm->arch.cvm.state) == CVM_STATE_ACTIVE) {
+ return ret;
+ }
+
+ if (vcpu->vcpu_id == 0) {
+ ret = kvm_create_all_tecs(vcpu->kvm);
+ if (ret != 0) {
+ return ret;
+ }
+ } else {
+ kvm_kick_boot_vcpu(vcpu->kvm);
+ }
+
+ mutex_lock(&vcpu->kvm->lock);
+
+ if (vcpu->vcpu_id == 0) {
+ ret = kvm_sel2_map_protected_ipa(vcpu);
+ if (ret) {
+ kvm_err("Map protected ipa failed!\n");
+ goto unlock_exit;
+ }
+ ret = tmi_cvm_activate(vcpu->kvm->arch.cvm.rd);
+ if (ret) {
+ kvm_err("tmi_cvm_activate failed!\n");
+ goto unlock_exit;
+ }
+
+ WRITE_ONCE(vcpu->kvm->arch.cvm.state, CVM_STATE_ACTIVE);
+ kvm_info("cVM%d is activated!\n", vcpu->kvm->arch.cvm.cvm_vmid);
+ }
+unlock_exit:
+ mutex_unlock(&vcpu->kvm->lock);
+
+ return ret;
+}
+
+int kvm_tec_enter(struct kvm_vcpu *vcpu)
+{
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+ if (READ_ONCE(vcpu->kvm->arch.cvm.state) != CVM_STATE_ACTIVE) {
+ return -EINVAL;
+ }
+
+ /* set/clear TWI TWE flags */
+ if (vcpu->arch.hcr_el2 & HCR_TWI) {
+ run->tec_entry.flags |= TEC_ENTRY_FLAG_TRAP_WFI;
+ } else {
+ run->tec_entry.flags &= ~TEC_ENTRY_FLAG_TRAP_WFI;
+ }
+
+ if (vcpu->arch.hcr_el2 & HCR_TWE) {
+ run->tec_entry.flags |= TEC_ENTRY_FLAG_TRAP_WFE;
+ } else {
+ run->tec_entry.flags &= ~TEC_ENTRY_FLAG_TRAP_WFE;
+ }
+
+ return tmi_tec_enter(vcpu->arch.tec.tec, __pa(run));
+}
+
+int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target)
+{
+ int ret;
+ ret = tmi_psci_complete(calling->arch.tec.tec, target->arch.tec.tec);
+ if (ret) {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#define SIMD_PAGE_SIZE 3*PAGE_SIZE
+int kvm_arch_tec_init(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.tec.tec_run = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
+ if (!vcpu->arch.tec.tec_run) {
+ return -ENOMEM;
+ }
+
+ vcpu->arch.tec.tec = tmi_mem_alloc(vcpu->kvm->arch.cvm.rd, NO_NUMA, TMM_MEM_TYPE_TEC, TMM_MEM_MAP_SIZE_MAX);
+ if (vcpu->arch.tec.tec == 0) {
+ kvm_info("KVM tmi_mem_alloc failed:%d\n", vcpu->vcpu_id);
+ return -ENOMEM;
+ }
+ kvm_info("KVM inits cVM VCPU:%d\n", vcpu->vcpu_id);
+
+ return 0;
+}
+
+int kvm_init_tmm(void)
+{
+ int ret;
+
+ if (PAGE_SIZE != SZ_4K) {
+ return 0;
+ }
+
+ if (tmi_check_version()) {
+ return 0;
+ }
+
+ ret = cvm_vmid_init();
+ if (ret) {
+ return ret;
+ }
+
+ tmm_feat_reg0 = tmi_features(0);
+ kvm_info("TMM feature0: 0x%lx\n", tmm_feat_reg0);
+
+ static_branch_enable(&kvm_cvm_is_available);
+
+ return 0;
+}
+
+int kvm_init_cvm_vm(struct kvm *kvm)
+{
+ struct tmi_cvm_params *params;
+
+ params = (struct tmi_cvm_params *)kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
+ if (!params) {
+ return -ENOMEM;
+ }
+
+ kvm->arch.cvm.params = params;
+
+ return 0;
+}
+
+int kvm_load_user_data(struct kvm *kvm, unsigned long arg)
+{
+ struct kvm_user_data user_data;
+ void __user *argp = (void __user *)arg;
+
+ if (!kvm_is_cvm(kvm))
+ return -EFAULT;
+
+ if (copy_from_user(&user_data, argp, sizeof(user_data)))
+ return -EFAULT;
+
+ kvm->arch.cvm.loader_start = user_data.loader_start;
+ kvm->arch.cvm.initrd_start = user_data.initrd_start;
+ kvm->arch.cvm.initrd_size = user_data.initrd_size;
+ kvm->arch.cvm.ram_size = user_data.ram_size;
+ memcpy(&kvm->arch.cvm.numa_info, &user_data.numa_info, sizeof(struct kvm_numa_info));
+
+ return 0;
+}
+
+void kvm_cvm_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ kvm_timer_vcpu_put(vcpu);
+ kvm_vgic_put(vcpu);
+ vcpu->cpu = -1;
+}
+unsigned long cvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu,
+ unsigned long target_affinity, unsigned long lowest_affinity_level)
+{
+ struct kvm_vcpu *target_vcpu;
+
+ if (lowest_affinity_level != 0)
+ return PSCI_RET_INVALID_PARAMS;
+
+ target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, target_affinity);
+ if (!target_vcpu)
+ return PSCI_RET_INVALID_PARAMS;
+
+ cvm_psci_complete(vcpu, target_vcpu);
+ return PSCI_RET_SUCCESS;
+}
+
+int kvm_cvm_vcpu_set_events(struct kvm_vcpu *vcpu,
+ bool serror_pending, bool ext_dabt_pending)
+{
+ if (serror_pending)
+ return -EINVAL;
+
+ if (ext_dabt_pending) {
+ if (!(((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_entry.flags &
+ TEC_ENTRY_FLAG_EMUL_MMIO))
+ return -EINVAL;
+
+ ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_entry.flags
+ &= ~TEC_ENTRY_FLAG_EMUL_MMIO;
+ ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_entry.flags
+ |= TEC_ENTRY_FLAG_INJECT_SEA;
+ }
+ return 0;
+}
diff --git a/arch/arm64/kvm/cvm_exit.c b/arch/arm64/kvm/cvm_exit.c
new file mode 100644
index 000000000..ba07a23be
--- /dev/null
+++ b/arch/arm64/kvm/cvm_exit.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+#include <linux/kvm_host.h>
+#include <kvm/arm_hypercalls.h>
+#include <kvm/arm_psci.h>
+
+#include <asm/kvm_tmi.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_coproc.h>
+
+typedef int (*exit_handler_fn)(struct kvm_vcpu *vcpu);
+
+static void update_arch_timer_irq_lines(struct kvm_vcpu *vcpu, bool unmask_ctl)
+{
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+
+ __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = run->tec_exit.cntv_ctl;
+ __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = run->tec_exit.cntv_cval;
+ __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = run->tec_exit.cntp_ctl;
+ __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = run->tec_exit.cntp_cval;
+
+ /* Because the timer mask is tainted by TMM, we don't know the
+ * true intent of the guest. Here, we assume mask is always
+ * cleared during WFI.
+ */
+ if (unmask_ctl) {
+ __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) &= ~ARCH_TIMER_CTRL_IT_MASK;
+ __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) &= ~ARCH_TIMER_CTRL_IT_MASK;
+ }
+
+ kvm_cvm_timers_update(vcpu);
+}
+
+static int tec_exit_reason_notimpl(struct kvm_vcpu *vcpu)
+{
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+
+ pr_err("[vcpu %d] Unhandled exit reason from cvm (ESR: %#llx)\n",
+ vcpu->vcpu_id, run->tec_exit.esr);
+ return -ENXIO;
+}
+
+/* The process is the same as kvm_handle_wfx,
+ * except the tracing and updating operation for pc,
+ * we copy kvm_handle_wfx process here
+ * to avoid changing kvm_handle_wfx function.
+ */
+static int tec_exit_wfx(struct kvm_vcpu *vcpu)
+{
+ u64 esr = kvm_vcpu_get_esr(vcpu);
+
+ if (esr & ESR_ELx_WFx_ISS_WFE) {
+ vcpu->stat.wfe_exit_stat++;
+ } else {
+ vcpu->stat.wfi_exit_stat++;
+ }
+
+ if (esr & ESR_ELx_WFx_ISS_WFxT) {
+ if (esr & ESR_ELx_WFx_ISS_RV) {
+ u64 val, now;
+
+ now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT);
+ val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
+
+ if (now >= val)
+ goto out;
+ } else {
+ /* Treat WFxT as WFx if RN is invalid */
+ esr &= ~ESR_ELx_WFx_ISS_WFxT;
+ }
+ }
+
+ if (esr & ESR_ELx_WFx_ISS_WFE) {
+ kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
+ } else {
+ vcpu->arch.pvsched.pv_unhalted = false;
+ if (esr & ESR_ELx_WFx_ISS_WFxT)
+ vcpu->arch.flags |= KVM_ARM64_WFIT;
+ kvm_vcpu_block(vcpu);
+ vcpu->arch.flags &= ~KVM_ARM64_WFIT;
+ kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+ }
+
+out:
+ return 1;
+}
+
+static int tec_exit_sys_reg(struct kvm_vcpu *vcpu)
+{
+ int ret;
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+ unsigned long esr = kvm_vcpu_get_esr(vcpu);
+ int rt = kvm_vcpu_sys_get_rt(vcpu);
+ bool is_write = !(esr & 1);
+
+ if (is_write) {
+ vcpu_set_reg(vcpu, rt, run->tec_exit.gprs[0]);
+ }
+
+ ret = kvm_handle_sys_reg(vcpu);
+
+ if (ret >= 0 && !is_write) {
+ run->tec_entry.gprs[0] = vcpu_get_reg(vcpu, rt);
+ }
+ return ret;
+}
+
+static int tec_exit_sync_dabt(struct kvm_vcpu *vcpu)
+{
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+
+ if (kvm_vcpu_dabt_iswrite(vcpu) && kvm_vcpu_dabt_isvalid(vcpu)) {
+ vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu),
+ run->tec_exit.gprs[0]);
+ }
+ return kvm_handle_guest_abort(vcpu);
+}
+
+static int tec_exit_sync_iabt(struct kvm_vcpu *vcpu)
+{
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+
+ pr_err("[vcpu %d] Unhandled instruction abort (ESR: %#llx).\n",
+ vcpu->vcpu_id, run->tec_exit.esr);
+
+ return -ENXIO;
+}
+
+static exit_handler_fn tec_exit_handlers[] = {
+ [0 ... ESR_ELx_EC_MAX] = tec_exit_reason_notimpl,
+ [ESR_ELx_EC_WFx] = tec_exit_wfx,
+ [ESR_ELx_EC_SYS64] = tec_exit_sys_reg,
+ [ESR_ELx_EC_DABT_LOW] = tec_exit_sync_dabt,
+ [ESR_ELx_EC_IABT_LOW] = tec_exit_sync_iabt
+};
+
+static int tec_exit_psci(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+
+ for (i = 0; i < TEC_EXIT_NR_GPRS; ++i) {
+ vcpu_set_reg(vcpu, i, run->tec_exit.gprs[i]);
+ }
+
+ return kvm_psci_call(vcpu);
+}
+
+static int tec_exit_host_call(struct kvm_vcpu *vcpu)
+{
+ int ret, i;
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+
+ vcpu->stat.hvc_exit_stat++;
+
+ for (i = 0; i < TEC_EXIT_NR_GPRS; ++i) {
+ vcpu_set_reg(vcpu, i, run->tec_exit.gprs[i]);
+ }
+
+ ret = kvm_hvc_call_handler(vcpu);
+
+ if (ret < 0) {
+ vcpu_set_reg(vcpu, 0, ~0UL);
+ ret = 1;
+ }
+ for (i = 0; i < TEC_EXIT_NR_GPRS; ++i) {
+ run->tec_entry.gprs[i] = vcpu_get_reg(vcpu, i);
+ }
+
+ return ret;
+}
+
+/*
+ * Return > 0 to return to guest, < 0 on error, 0(and set exit_reason) on
+ * proper exit to userspace
+ */
+
+int handle_cvm_exit(struct kvm_vcpu *vcpu, int tec_run_ret)
+{
+ unsigned long status;
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+ u8 esr_ec = ESR_ELx_EC(run->tec_exit.esr);
+ bool is_wfx;
+
+ status = TMI_RETURN_STATUS(tec_run_ret);
+
+ if (status == TMI_ERROR_CVM_POWEROFF) {
+ vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+ vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SHUTDOWN;
+ return 0;
+ }
+
+ if (status == TMI_ERROR_CVM_STATE) {
+ vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
+ return 0;
+ }
+
+ if (tec_run_ret) {
+ return -ENXIO;
+ }
+
+ vcpu->arch.fault.esr_el2 = run->tec_exit.esr;
+ vcpu->arch.fault.far_el2 = run->tec_exit.far;
+ vcpu->arch.fault.hpfar_el2 = run->tec_exit.hpfar;
+
+ is_wfx = (run->tec_exit.exit_reason == TMI_EXIT_SYNC) && (esr_ec == ESR_ELx_EC_WFx);
+ update_arch_timer_irq_lines(vcpu, is_wfx);
+
+ run->tec_entry.flags = 0;
+
+ switch (run->tec_exit.exit_reason) {
+ case TMI_EXIT_FIQ:
+ case TMI_EXIT_IRQ:
+ return 1;
+ case TMI_EXIT_PSCI:
+ return tec_exit_psci(vcpu);
+ case TMI_EXIT_SYNC:
+ return tec_exit_handlers[esr_ec](vcpu);
+ case TMI_EXIT_HOST_CALL:
+ return tec_exit_host_call(vcpu);
+ }
+
+ kvm_pr_unimpl("Unsupported exit reason : 0x%llx\n",
+ run->tec_exit.exit_reason);
+ return 0;
+}
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index f9c3dbc99..ecdd35527 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -27,6 +27,10 @@
#include <asm/kvm_coproc.h>
#include <asm/sigcontext.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
+
#include "trace.h"
struct kvm_stats_debugfs_item debugfs_entries[] = {
@@ -818,6 +822,10 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
bool has_esr = events->exception.serror_has_esr;
bool ext_dabt_pending = events->exception.ext_dabt_pending;
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ return kvm_cvm_vcpu_set_events(vcpu, serror_pending, ext_dabt_pending);
+#endif
if (serror_pending && has_esr) {
if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
return -EINVAL;
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 452f4cacd..54d541767 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -229,6 +229,25 @@ void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
}
}
+#ifdef CONFIG_CVM_HOST
+void __vgic_v3_restore_tec_state(struct vgic_v3_cpu_if *cpu_if,
+ u64 *entry_hcr,
+ u64 *entry_lrs)
+{
+ u64 used_lrs = cpu_if->used_lrs;
+ int i;
+
+ *entry_hcr = cpu_if->vgic_hcr;
+ for (i = 0; i < kvm_vgic_global_state.nr_lr; ++i) {
+ if (i < used_lrs) {
+ entry_lrs[i] = cpu_if->vgic_lr[i];
+ } else {
+ entry_lrs[i] = 0;
+ }
+ }
+}
+#endif
+
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if)
{
u64 used_lrs = cpu_if->used_lrs;
diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c
index 6a2826f1b..6fc49784b 100644
--- a/arch/arm64/kvm/mmio.c
+++ b/arch/arm64/kvm/mmio.c
@@ -8,6 +8,10 @@
#include <asm/kvm_emulate.h>
#include <trace/events/kvm.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
+
#include "trace.h"
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
@@ -109,6 +113,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
&data);
data = vcpu_data_host_to_guest(vcpu, data, len);
vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->
+ tec_entry.gprs[0] = data;
+ }
+#endif
}
/*
@@ -177,7 +187,12 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
run->mmio.phys_addr = fault_ipa;
run->mmio.len = len;
vcpu->mmio_needed = 1;
-
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_entry.flags |=
+ TEC_ENTRY_FLAG_EMUL_MMIO;
+ }
+#endif
if (!ret) {
/* We handled the access successfully in the kernel. */
if (!is_write)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 6fa92a143..30426f6ad 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -474,6 +474,7 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
spin_lock(&kvm->mmu_lock);
pgt = mmu->pgt;
+
if (pgt) {
mmu->pgd_phys = 0;
mmu->pgt = NULL;
@@ -790,6 +791,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level);
write_fault = kvm_is_write_fault(vcpu);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ write_fault = true;
+ prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W;
+ }
+#endif
exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
VM_BUG_ON(write_fault && exec_fault);
vcpu->stat.mabt_exit_stat++;
diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
index 32bb26be8..0160ee8d6 100644
--- a/arch/arm64/kvm/psci.c
+++ b/arch/arm64/kvm/psci.c
@@ -16,6 +16,9 @@
#include <kvm/arm_psci.h>
#include <kvm/arm_hypercalls.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
/*
* This is an implementation of the Power State Coordination Interface
* as described in ARM document number ARM DEN 0022A.
@@ -78,6 +81,10 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
*/
if (!vcpu)
return PSCI_RET_INVALID_PARAMS;
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ cvm_psci_complete(source_vcpu, vcpu);
+#endif
if (!vcpu->arch.power_off) {
if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
return PSCI_RET_ALREADY_ON;
@@ -133,7 +140,10 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
/* Ignore other bits of target affinity */
target_affinity &= target_affinity_mask;
-
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ return cvm_psci_vcpu_affinity_info(vcpu, target_affinity, lowest_affinity_level);
+#endif
/*
* If one or more VCPU matching target affinity are running
* then ON else OFF
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 51f4c5e85..bb177d58c 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -30,6 +30,9 @@
#include <asm/kvm_mmu.h>
#include <asm/virt.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
/* Maximum phys_shift supported for any VM on this host */
static u32 kvm_ipa_limit;
@@ -199,6 +202,9 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
{
kfree(vcpu->arch.sve_state);
+#ifdef CONFIG_CVM_HOST
+ kvm_destroy_tec(vcpu);
+#endif
}
static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
@@ -433,7 +439,11 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
u32 parange, phys_shift;
u8 lvls, pbha = 0xf;
+#ifdef CONFIG_CVM_HOST
+ if ((type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) && (!kvm_is_cvm(kvm)))
+#else
if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
+#endif
return -EINVAL;
phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
diff --git a/arch/arm64/kvm/tmi.c b/arch/arm64/kvm/tmi.c
new file mode 100644
index 000000000..6eb5dbd97
--- /dev/null
+++ b/arch/arm64/kvm/tmi.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+#include <linux/arm-smccc.h>
+#include <asm/kvm_tmi.h>
+
+u64 tmi_version(void)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_VESION, &res);
+ return res.a1;
+}
+
+u64 tmi_data_create(u64 data, u64 rd, u64 map_addr, u64 src, u64 level)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_DATA_CREATE, data, rd, map_addr, src, level, &res);
+ return res.a1;
+}
+
+u64 tmi_data_destroy(u64 rd, u64 map_addr, u64 level)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_DATA_DESTROY, rd, map_addr, level, &res);
+ return res.a1;
+}
+
+u64 tmi_cvm_activate(u64 rd)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_CVM_ACTIVATE, rd, &res);
+ return res.a1;
+}
+
+u64 tmi_cvm_create(u64 rd, u64 params_ptr)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_CVM_CREATE, rd, params_ptr, &res);
+ return res.a1;
+}
+
+u64 tmi_cvm_destroy(u64 rd)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_CVM_DESTROY, rd, &res);
+ return res.a1;
+}
+
+u64 tmi_tec_create(u64 tec, u64 rd, u64 mpidr, u64 params_ptr)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TEC_CREATE, tec, rd, mpidr, params_ptr, &res);
+ return res.a1;
+}
+
+u64 tmi_tec_destroy(u64 tec)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TEC_DESTROY, tec, &res);
+ return res.a1;
+}
+
+u64 tmi_tec_enter(u64 tec, u64 run_ptr)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TEC_ENTER, tec, run_ptr, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_create(u64 ttt, u64 rd, u64 map_addr, u64 level)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_CREATE, ttt, rd, map_addr, level, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_destroy(u64 ttt, u64 rd, u64 map_addr, u64 level)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_DESTROY, ttt, rd, map_addr, level, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_map_unprotected(u64 rd, u64 map_addr, u64 level, u64 ttte)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_MAP_UNPROTECTED, rd, map_addr, level, ttte, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_unmap_unprotected(u64 rd, u64 map_addr, u64 level, u64 ns)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_UNMAP_UNPROTECTED, rd, map_addr, level, ns, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_unmap_protected(u64 rd, u64 map_addr, u64 level)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_UNMAP_PROTECTED, rd, map_addr, level, &res);
+ return res.a1;
+}
+
+u64 tmi_psci_complete(u64 calling_tec, u64 target_tec)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_PSCI_COMPLETE, calling_tec, target_tec, &res);
+ return res.a1;
+}
+
+u64 tmi_features(u64 index)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_FEATURES, index, &res);
+ return res.a1;
+}
+
+u64 tmi_mem_alloc(u64 rd, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type,
+ enum tmi_tmm_map_size tmm_map_size)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_MEM_ALLOC, rd, numa_id, tmm_mem_type, tmm_map_size, &res);
+ return res.a1;
+}
+
+u64 tmi_mem_free(u64 pa, u64 numa_id, enum tmi_tmm_mem_type tmm_mem_type,
+ enum tmi_tmm_map_size tmm_map_size)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_MEM_FREE, pa, numa_id, tmm_mem_type, tmm_map_size, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_map_range(u64 rd, u64 map_addr, u64 size, u64 cur_node, u64 target_node)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_MAP_RANGE, rd, map_addr, size, cur_node, target_node, &res);
+ return res.a1;
+}
+
+u64 tmi_ttt_unmap_range(u64 rd, u64 map_addr, u64 size, u64 node_id)
+{
+ struct arm_smccc_res res;
+ arm_smccc_1_1_smc(TMI_TMM_TTT_UNMAP_RANGE, rd, map_addr, size, node_id, &res);
+ return res.a1;
+}
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 213afce81..c80730152 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -10,6 +10,10 @@
#include <asm/kvm_mmu.h>
#include <asm/kvm_asm.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
+
#include "vgic.h"
static bool group0_trap;
@@ -674,7 +678,10 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
kvm_vgic_global_state.vcpu_base = 0;
} else {
kvm_vgic_global_state.vcpu_base = info->vcpu.start;
- kvm_vgic_global_state.can_emulate_gicv2 = true;
+#ifdef CONFIG_CVM_HOST
+ if (!static_branch_unlikely(&kvm_cvm_is_available))
+#endif
+ kvm_vgic_global_state.can_emulate_gicv2 = true;
ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
if (ret) {
kvm_err("Cannot register GICv2 KVM device.\n");
@@ -735,7 +742,12 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
-
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ cpu_if->vgic_vmcr = ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_exit.gicv3_vmcr;
+ return;
+ }
+#endif
if (likely(cpu_if->vgic_sre))
cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
}
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index 116aa91d5..ef9ffea7a 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -12,6 +12,10 @@
#include <asm/kvm_hyp.h>
+#ifdef CONFIG_CVM_HOST
+#include <asm/kvm_tmi.h>
+#endif
+
#include "vgic.h"
#define CREATE_TRACE_POINTS
@@ -872,11 +876,42 @@ static inline bool can_access_vgic_from_kernel(void)
return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe();
}
+#ifdef CONFIG_CVM_HOST
+static inline void vgic_tmm_save_state(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ struct tmi_tec_run *tec_run = vcpu->arch.tec.tec_run;
+
+ for (i = 0; i < kvm_vgic_global_state.nr_lr; ++i) {
+ cpu_if->vgic_lr[i] = tec_run->tec_exit.gicv3_lrs[i];
+ tec_run->tec_entry.gicv3_lrs[i] = 0;
+ }
+}
+
+static inline void vgic_tmm_restore_state(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ struct tmi_tec_run *tec_run = vcpu->arch.tec.tec_run;
+
+ for (i = 0; i < kvm_vgic_global_state.nr_lr; ++i) {
+ tec_run->tec_entry.gicv3_lrs[i] = cpu_if->vgic_lr[i];
+ tec_run->tec_exit.gicv3_lrs[i] = cpu_if->vgic_lr[i];
+ }
+}
+#endif
+
static inline void vgic_save_state(struct kvm_vcpu *vcpu)
{
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_save_state(vcpu);
else
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ vgic_tmm_save_state(vcpu);
+ else
+#endif
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
}
@@ -907,6 +942,13 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_restore_state(vcpu);
else
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)){
+ vgic_tmm_restore_state(vcpu);
+ return;
+ }
+ else
+#endif
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
}
@@ -948,7 +990,10 @@ void kvm_vgic_load(struct kvm_vcpu *vcpu)
{
if (unlikely(!vgic_initialized(vcpu->kvm)))
return;
-
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ return;
+#endif
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_load(vcpu);
else
@@ -959,7 +1004,10 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
{
if (unlikely(!vgic_initialized(vcpu->kvm)))
return;
-
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ return;
+#endif
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_put(vcpu);
else
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 413d6f9bc..18ccd16fc 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -117,4 +117,8 @@ void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
u32 timer_get_ctl(struct arch_timer_context *ctxt);
u64 timer_get_cval(struct arch_timer_context *ctxt);
+#ifdef CONFIG_CVM_HOST
+/* Needed for S-EL2 */
+void kvm_cvm_timers_update(struct kvm_vcpu *vcpu);
+#endif
#endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 595c9da4f..1cb861d6c 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -345,6 +345,27 @@ struct kvm_vcpu {
struct kvm_vcpu_arch arch;
};
+#ifdef CONFIG_CVM_HOST
+#define KVM_TYPE_CVM_BIT 8
+#define CVM_MAX_HALT_POLL_NS 100000
+
+DECLARE_STATIC_KEY_FALSE(kvm_cvm_is_available);
+
+static __always_inline bool vcpu_is_tec(struct kvm_vcpu *vcpu)
+{
+ if (static_branch_unlikely(&kvm_cvm_is_available)) {
+ return vcpu->arch.tec.tec_run;
+ }
+ return false;
+}
+
+static inline bool kvm_arm_cvm_type(unsigned long type)
+{
+ return type & (1UL << KVM_TYPE_CVM_BIT);
+}
+
+#endif
+
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
/*
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index a1d8b1184..3332ee9ed 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1373,6 +1373,35 @@ struct kvm_master_dev_info {
#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
#define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
+#ifdef CONFIG_CVM_HOST
+#define KVM_LOAD_USER_DATA _IOW(KVMIO, 0x49, struct kvm_user_data)
+
+#define KVM_CAP_ARM_TMM 300 /* FIXME: Large number to prevent conflicts */
+#define MAX_NUMA_NODE 8
+#define MAX_CPU_BIT_MAP 0
+
+struct kvm_numa_node {
+ __u64 numa_id;
+ __u64 ipa_start;
+ __u64 ipa_size;
+ int64_t host_numa_node;
+ __u64 cpu_id[MAX_CPU_BIT_MAP];
+};
+
+struct kvm_numa_info {
+ __u64 numa_cnt;
+ struct kvm_numa_node numa_nodes[MAX_NUMA_NODE];
+};
+
+struct kvm_user_data {
+ __u64 loader_start;
+ __u64 initrd_start;
+ __u64 initrd_size;
+ __u64 ram_size;
+ struct kvm_numa_info numa_info;
+};
+#endif
+
/* enable ucontrol for s390 */
struct kvm_s390_ucas_mapping {
__u64 user_addr;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9166ef044..ef9f6d9df 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1089,7 +1089,12 @@ static struct kvm *kvm_create_vm(unsigned long type)
goto out_err_no_arch_destroy_vm;
}
- kvm->max_halt_poll_ns = halt_poll_ns;
+#ifdef CONFIG_CVM_HOST
+ if (kvm_arm_cvm_type(type))
+ kvm->max_halt_poll_ns = CVM_MAX_HALT_POLL_NS;
+ else
+#endif
+ kvm->max_halt_poll_ns = halt_poll_ns;
r = kvm_arch_init_vm(kvm, type);
if (r)
--
2.33.0
2
1
From: Jingxian He <hejingxian(a)huawei.com>
hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
CVE: N/A
------------
Add lpi support for cvm guest os:
The gic-its driver mapped memory must shared with qemu/kvm.
The cvm guest gic-its driver alloc memory from bounce buffer
to share with qemu/kvm.
Signed-off-by: Jingxian He <hejingxian(a)huawei.com>
Signed-off-by: wuweinan <wuweinan(a)huawei.com>
---
drivers/irqchip/irq-gic-v3-its.c | 228 ++++++++++++++++++++++++++++---
1 file changed, 207 insertions(+), 21 deletions(-)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 632444f86..a09cad8a0 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -29,6 +29,10 @@
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
+#ifdef CONFIG_CVM_GUEST
+#include <linux/swiotlb.h>
+#include <asm/cvm_guest.h>
+#endif
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-v3.h>
@@ -182,6 +186,90 @@ struct its_baser {
struct its_device;
+#ifdef CONFIG_CVM_GUEST
+static struct device cvm_alloc_device;
+static LIST_HEAD(cvm_its_nodes);
+static raw_spinlock_t cvm_its_lock;
+
+struct its_device_order {
+ struct its_device *dev;
+ struct list_head entry;
+ int itt_order;
+};
+
+static inline struct page *its_alloc_shared_pages_node(int node, gfp_t gfp,
+ unsigned int order)
+{
+ return swiotlb_alloc(&cvm_alloc_device, (1 << order) * PAGE_SIZE);
+}
+
+static inline struct page *its_alloc_shared_pages(gfp_t gfp, unsigned int order)
+{
+ return its_alloc_shared_pages_node(NUMA_NO_NODE, gfp, order);
+}
+
+static void its_free_shared_pages(void *addr, int order)
+{
+ if (order < 0)
+ return;
+
+ swiotlb_free(&cvm_alloc_device, (struct page*)addr, (1 << order) * PAGE_SIZE);
+}
+
+static int add_its_device_order(struct its_device *dev, int itt_order)
+{
+ struct its_device_order *new;
+ unsigned long flags;
+
+ new = kmalloc(sizeof(struct its_device_order), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ new->dev = dev;
+ new->itt_order = itt_order;
+ raw_spin_lock_irqsave(&cvm_its_lock, flags);
+ list_add_tail(&new->entry, &cvm_its_nodes);
+ raw_spin_unlock_irqrestore(&cvm_its_lock, flags);
+ return 0;
+}
+
+/* get its device order and then free its device order */
+static int get_its_device_order(struct its_device *dev)
+{
+ struct its_device_order *pos, *tmp;
+ unsigned long flags;
+ int itt_order = -1;
+
+ raw_spin_lock_irqsave(&cvm_its_lock, flags);
+ list_for_each_entry_safe(pos, tmp, &cvm_its_nodes, entry) {
+ if (pos->dev == dev) {
+ itt_order = pos->itt_order;
+ list_del(&pos->entry);
+ kfree(pos);
+ goto found;
+ }
+ }
+found:
+ raw_spin_unlock_irqrestore(&cvm_its_lock, flags);
+ return itt_order;
+}
+
+static void *its_alloc_shared_page_address(struct its_device *dev, struct its_node *its, int sz)
+{
+ struct page *page;
+ int itt_order;
+
+ itt_order = get_order(sz);
+ if (add_its_device_order(dev, itt_order))
+ return NULL;
+
+ page = its_alloc_shared_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
+ itt_order);
+ if (!page)
+ return NULL;
+ return (void *)page_address(page);
+}
+#endif
+
/*
* The ITS structure - contains most of the infrastructure, with the
* top-level MSI domain, the command queue, the collections, and the
@@ -2447,7 +2535,13 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags)
{
struct page *prop_page;
- prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ prop_page = its_alloc_shared_pages(gfp_flags,
+ get_order(LPI_PROPBASE_SZ));
+ else
+#endif
+ prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
if (!prop_page)
return NULL;
@@ -2458,8 +2552,14 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags)
static void its_free_prop_table(struct page *prop_page)
{
- free_pages((unsigned long)page_address(prop_page),
- get_order(LPI_PROPBASE_SZ));
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ its_free_shared_pages(page_address(prop_page),
+ get_order(LPI_PROPBASE_SZ));
+ else
+#endif
+ free_pages((unsigned long)page_address(prop_page),
+ get_order(LPI_PROPBASE_SZ));
}
static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
@@ -2581,7 +2681,13 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
order = get_order(GITS_BASER_PAGES_MAX * psz);
}
- page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ page = its_alloc_shared_pages_node(its->numa_node,
+ GFP_KERNEL | __GFP_ZERO, order);
+ else
+#endif
+ page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
if (!page)
return -ENOMEM;
@@ -2594,7 +2700,12 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
/* 52bit PA is supported only when PageSize=64K */
if (psz != SZ_64K) {
pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
- free_pages((unsigned long)base, order);
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ its_free_shared_pages(base, order);
+ else
+#endif
+ free_pages((unsigned long)base, order);
return -ENXIO;
}
@@ -2648,7 +2759,12 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
&its->phys_base, its_base_type_string[type],
val, tmp);
- free_pages((unsigned long)base, order);
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ its_free_shared_pages(base, order);
+ else
+#endif
+ free_pages((unsigned long)base, order);
return -ENXIO;
}
@@ -2787,8 +2903,14 @@ static void its_free_tables(struct its_node *its)
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
if (its->tables[i].base) {
- free_pages((unsigned long)its->tables[i].base,
- its->tables[i].order);
+#ifdef CONFIG_CVM_GUEST
+ if (!is_cvm_world())
+ its_free_shared_pages(its->tables[i].base,
+ its->tables[i].order);
+ else
+#endif
+ free_pages((unsigned long)its->tables[i].base,
+ its->tables[i].order);
its->tables[i].base = NULL;
}
}
@@ -3051,7 +3173,13 @@ static bool allocate_vpe_l2_table(int cpu, u32 id)
/* Allocate memory for 2nd level table */
if (!table[idx]) {
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ page = its_alloc_shared_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(psz));
+ else
+#endif
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
if (!page)
return false;
@@ -3170,7 +3298,13 @@ static int allocate_vpe_l1_table(void)
pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
np, npg, psz, epp, esz);
- page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ page = its_alloc_shared_pages(GFP_ATOMIC | __GFP_ZERO,
+ get_order(np * PAGE_SIZE));
+ else
+#endif
+ page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
if (!page)
return -ENOMEM;
@@ -3218,8 +3352,14 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags)
{
struct page *pend_page;
- pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
- get_order(LPI_PENDBASE_SZ));
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ pend_page = its_alloc_shared_pages(gfp_flags | __GFP_ZERO,
+ get_order(LPI_PENDBASE_SZ));
+ else
+#endif
+ pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
+ get_order(LPI_PENDBASE_SZ));
if (!pend_page)
return NULL;
@@ -3231,7 +3371,13 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags)
static void its_free_pending_table(struct page *pt)
{
- free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ its_free_shared_pages(page_address(pt),
+ get_order(LPI_PENDBASE_SZ));
+ else
+#endif
+ free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
}
/*
@@ -3768,8 +3914,15 @@ static bool its_alloc_table_entry(struct its_node *its,
/* Allocate memory for 2nd level table */
if (!table[idx]) {
- page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
- get_order(baser->psz));
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ page = its_alloc_shared_pages_node(its->numa_node,
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(baser->psz));
+ else
+#endif
+ page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
+ get_order(baser->psz));
if (!page)
return false;
@@ -3872,7 +4025,12 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
nr_ites = max(2, nvecs);
sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
- itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ itt = its_alloc_shared_page_address(dev, its, sz);
+ else
+#endif
+ itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
if (alloc_lpis) {
lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
if (lpi_map)
@@ -3886,7 +4044,12 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
kfree(dev);
- kfree(itt);
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ its_free_shared_pages(itt, get_order(sz));
+ else
+#endif
+ kfree(itt);
kfree(lpi_map);
kfree(col_map);
return NULL;
@@ -3923,7 +4086,12 @@ static void its_free_device(struct its_device *its_dev)
list_del(&its_dev->entry);
raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
kfree(its_dev->event_map.col_map);
- kfree(its_dev->itt);
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ its_free_shared_pages(its_dev->itt, get_its_device_order(its_dev));
+ else
+#endif
+ kfree(its_dev->itt);
if (its_dev->is_vdev) {
WARN_ON(!rsv_devid_pool_cap);
@@ -5594,8 +5762,15 @@ static int __init its_probe_one(struct resource *res,
its->numa_node = numa_node;
- page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
- get_order(ITS_CMD_QUEUE_SZ));
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ page = its_alloc_shared_pages_node(its->numa_node,
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(ITS_CMD_QUEUE_SZ));
+ else
+#endif
+ page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
+ get_order(ITS_CMD_QUEUE_SZ));
if (!page) {
err = -ENOMEM;
goto out_unmap_sgir;
@@ -5661,7 +5836,12 @@ static int __init its_probe_one(struct resource *res,
out_free_tables:
its_free_tables(its);
out_free_cmd:
- free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ its_free_shared_pages(its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
+ else
+#endif
+ free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
out_unmap_sgir:
if (its->sgir_base)
iounmap(its->sgir_base);
@@ -5957,6 +6137,12 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
bool has_vtimer_irqbypass = false;
int err;
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world()) {
+ device_initialize(&cvm_alloc_device);
+ raw_spin_lock_init(&cvm_its_lock);
+ }
+#endif
gic_rdists = rdists;
its_parent = parent_domain;
--
2.33.0
1
0

08 Apr '24
From: Jingxian He <hejingxian(a)huawei.com>
hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
CVE: N/A
------------
Add bounce buffer feature for cvm guest os:
1) Cvm guest mapped memory is secure memory.
2) Qemu/kvm cannot access the secure memory.
3) Use bounce buffer as memory shared by cvm guest and qemu/kvm.
Signed-off-by: Jingxian He <hejingxian(a)huawei.com>
---
arch/arm64/configs/defconfig | 1 +
arch/arm64/configs/openeuler_defconfig | 1 +
arch/arm64/include/asm/cvm_guest.h | 21 ++++++
arch/arm64/kvm/Kconfig | 8 +++
arch/arm64/kvm/Makefile | 1 +
arch/arm64/kvm/cvm_guest.c | 90 ++++++++++++++++++++++++++
arch/arm64/mm/mmu.c | 11 ++++
arch/arm64/mm/pageattr.c | 9 ++-
include/linux/swiotlb.h | 13 ++++
kernel/dma/direct.c | 39 +++++++++++
kernel/dma/swiotlb.c | 86 +++++++++++++++++++++++-
11 files changed, 278 insertions(+), 2 deletions(-)
create mode 100644 arch/arm64/include/asm/cvm_guest.h
create mode 100644 arch/arm64/kvm/cvm_guest.c
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index ace2bf4ad..0ba4538d9 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -111,6 +111,7 @@ CONFIG_ACPI_APEI_EINJ=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
CONFIG_CVM_HOST=y
+CONFIG_CVM_GUEST=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index e298ca7e5..25a5fa5c7 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -763,6 +763,7 @@ CONFIG_IRQ_BYPASS_MANAGER=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
CONFIG_CVM_HOST=y
+CONFIG_CVM_GUEST=y
CONFIG_HAVE_KVM_IRQCHIP=y
CONFIG_HAVE_KVM_IRQFD=y
CONFIG_HAVE_KVM_IRQ_ROUTING=y
diff --git a/arch/arm64/include/asm/cvm_guest.h b/arch/arm64/include/asm/cvm_guest.h
new file mode 100644
index 000000000..3c5bda7ca
--- /dev/null
+++ b/arch/arm64/include/asm/cvm_guest.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+#ifndef __ASM_CVM_GUEST_H
+#define __ASM_CVM_GUEST_H
+
+#ifdef CONFIG_CVM_GUEST
+static inline bool cvm_mem_encrypt_active(void)
+{
+ return false;
+}
+
+int set_cvm_memory_encrypted(unsigned long addr, int numpages);
+
+int set_cvm_memory_decrypted(unsigned long addr, int numpages);
+
+bool is_cvm_world(void);
+
+#endif
+#endif
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 7c24a4d33..d21e27f74 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -54,6 +54,14 @@ config CVM_HOST
If unsure, say N.
+config CVM_GUEST
+ bool "CVM guest enable"
+ depends on KVM && SWIOTLB && ARM64
+ help
+ Support CVM guest based on S-EL2
+
+ If unsure, say N.
+
if KVM
source "virt/kvm/Kconfig"
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 3b92eaa4f..61dce3ab4 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -30,3 +30,4 @@ kvm-$(CONFIG_CVM_HOST) += cvm_exit.o
kvm-$(CONFIG_KVM_ARM_PMU) += pmu-emul.o
obj-$(CONFIG_KVM_HISI_VIRT) += hisilicon/
+obj-$(CONFIG_CVM_GUEST) += cvm_guest.o
diff --git a/arch/arm64/kvm/cvm_guest.c b/arch/arm64/kvm/cvm_guest.c
new file mode 100644
index 000000000..9df24af46
--- /dev/null
+++ b/arch/arm64/kvm/cvm_guest.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+
+#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
+#include <asm/tlbflush.h>
+
+static bool cvm_guest_enable __read_mostly;
+static int __init setup_cvm_guest(char *str)
+{
+ int ret;
+ unsigned int val;
+
+ if (!str)
+ return 0;
+
+ cvm_guest_enable = false;
+ ret = kstrtouint(str, 10, &val);
+ if (ret) {
+ pr_warn("Unable to parse cvm_guest.\n");
+ } else {
+ if (val)
+ cvm_guest_enable = true;
+ }
+ return ret;
+}
+early_param("cvm_guest", setup_cvm_guest);
+
+bool is_cvm_world(void)
+{
+ return cvm_guest_enable;
+}
+
+static int change_page_range_cvm(pte_t *ptep, unsigned long addr, void *data)
+{
+ bool encrypt = (bool)data;
+ pte_t pte = READ_ONCE(*ptep);
+ if (encrypt) {
+ if(!(pte.pte & 0x20)) {
+ return 0;
+ }
+ pte.pte = pte.pte & (~0x20);
+ } else {
+ if (pte.pte & 0x20) {
+ return 0;
+ }
+ /* Set NS BIT */
+ pte.pte = pte.pte | 0x20;
+ }
+ set_pte(ptep, pte);
+
+ return 0;
+}
+
+static int __change_memory_common_cvm(unsigned long start, unsigned long size, bool encrypt)
+{
+ int ret;
+ ret = apply_to_page_range(&init_mm, start, size, change_page_range_cvm, (void *)encrypt);
+ flush_tlb_kernel_range(start, start + size);
+ return ret;
+}
+
+static int __set_memory_encrypted(unsigned long addr,
+ int numpages,
+ bool encrypt)
+{
+ if (!is_cvm_world()) {
+ return 0;
+ }
+ WARN_ON(!__is_lm_address(addr));
+ return __change_memory_common_cvm(addr, PAGE_SIZE * numpages, encrypt);
+}
+
+int set_cvm_memory_encrypted(unsigned long addr, int numpages)
+{
+ return __set_memory_encrypted(addr, numpages, true);
+}
+
+int set_cvm_memory_decrypted(unsigned long addr, int numpages)
+{
+ return __set_memory_encrypted(addr, numpages, false);
+}
+
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 804d5197c..b6eb82f6d 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -38,6 +38,9 @@
#include <asm/ptdump.h>
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
+#ifdef CONFIG_CVM_GUEST
+#include <asm/cvm_guest.h>
+#endif
#define NO_BLOCK_MAPPINGS BIT(0)
#define NO_CONT_MAPPINGS BIT(1)
@@ -494,7 +497,11 @@ static void __init map_mem(pgd_t *pgdp)
int flags = 0, eflags = 0;
u64 i;
+#ifdef CONFIG_CVM_GUEST
+ if (rodata_full || debug_pagealloc_enabled() || is_cvm_world())
+#else
if (rodata_full || debug_pagealloc_enabled())
+#endif
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
#ifdef CONFIG_KFENCE
@@ -1514,7 +1521,11 @@ int arch_add_memory(int nid, u64 start, u64 size,
}
+#ifdef CONFIG_CVM_GUEST
+ if (rodata_full || debug_pagealloc_enabled() || is_cvm_world())
+#else
if (rodata_full || debug_pagealloc_enabled())
+#endif
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 0bc12dbf2..fe0650386 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -11,6 +11,9 @@
#include <asm/cacheflush.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>
+#ifdef CONFIG_CVM_GUEST
+#include <asm/cvm_guest.h>
+#endif
struct page_change_data {
pgprot_t set_mask;
@@ -188,7 +191,11 @@ int set_direct_map_default_noflush(struct page *page)
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
- if (!debug_pagealloc_enabled() && !rodata_full)
+#ifdef CONFIG_CVM_GUEST
+ if ((!debug_pagealloc_enabled() && !rodata_full) || is_cvm_world())
+#else
+ if ((!debug_pagealloc_enabled() && !rodata_full))
+#endif
return;
set_memory_valid((unsigned long)page_address(page), numpages, enable);
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 5d2dbe7e0..b4ca6622a 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -6,6 +6,9 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/limits.h>
+#ifdef CONFIG_CVM_GUEST
+#include <asm/cvm_guest.h>
+#endif
struct device;
struct page;
@@ -75,6 +78,16 @@ static inline bool is_swiotlb_buffer(phys_addr_t paddr)
return paddr >= io_tlb_start && paddr < io_tlb_end;
}
+#ifdef CONFIG_CVM_GUEST
+static inline bool is_swiotlb_for_alloc(struct device *dev)
+{
+ return is_cvm_world();
+}
+
+struct page *swiotlb_alloc(struct device *dev, size_t size);
+bool swiotlb_free(struct device *dev, struct page *page, size_t size);
+#endif
+
void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void);
size_t swiotlb_max_mapping_size(struct device *dev);
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 2922250f9..075e85cfb 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -75,6 +75,20 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
}
+#ifdef CONFIG_CVM_GUEST
+static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
+{
+ struct page *page = swiotlb_alloc(dev, size);
+
+ if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+ swiotlb_free(dev, page, size);
+ return NULL;
+ }
+
+ return page;
+}
+#endif
+
static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp_t gfp)
{
@@ -84,6 +98,11 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
WARN_ON_ONCE(!PAGE_ALIGNED(size));
+#ifdef CONFIG_CVM_GUEST
+ if (is_swiotlb_for_alloc(dev))
+ return dma_direct_alloc_swiotlb(dev, size);
+#endif
+
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
&phys_limit);
page = dma_alloc_contiguous(dev, size, gfp);
@@ -237,6 +256,11 @@ void *dma_direct_alloc(struct device *dev, size_t size,
return NULL;
}
out_free_pages:
+#ifdef CONFIG_CVM_GUEST
+ if (is_swiotlb_for_alloc(dev) &&
+ swiotlb_free(dev, page, size))
+ return NULL;
+#endif
dma_free_contiguous(dev, page, size);
return NULL;
}
@@ -271,6 +295,11 @@ void dma_direct_free(struct device *dev, size_t size,
else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
arch_dma_clear_uncached(cpu_addr, size);
+#ifdef CONFIG_CVM_GUEST
+ if (is_swiotlb_for_alloc(dev) &&
+ swiotlb_free(dev, dma_direct_to_page(dev, dma_addr), size))
+ return;
+#endif
dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
}
@@ -307,6 +336,11 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
return page;
out_free_pages:
+#ifdef CONFIG_CVM_GUEST
+ if (is_swiotlb_for_alloc(dev) &&
+ swiotlb_free(dev, page, size))
+ return NULL;
+#endif
dma_free_contiguous(dev, page, size);
return NULL;
}
@@ -325,6 +359,11 @@ void dma_direct_free_pages(struct device *dev, size_t size,
if (force_dma_unencrypted(dev))
set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
+#ifdef CONFIG_CVM_GUEST
+ if (is_swiotlb_for_alloc(dev) &&
+ swiotlb_free(dev, page, size))
+ return;
+#endif
dma_free_contiguous(dev, page, size);
}
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index d897d1613..579d3cb50 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -47,6 +47,10 @@
#include <linux/memblock.h>
#include <linux/iommu-helper.h>
+#ifdef CONFIG_CVM_GUEST
+#include <asm/cvm_guest.h>
+#endif
+
#define CREATE_TRACE_POINTS
#include <trace/events/swiotlb.h>
@@ -194,12 +198,20 @@ void __init swiotlb_update_mem_attributes(void)
void *vaddr;
unsigned long bytes;
+#ifdef CONFIG_CVM_GUEST
+ if (!is_cvm_world() && (no_iotlb_memory || late_alloc))
+#else
if (no_iotlb_memory || late_alloc)
+#endif
return;
vaddr = phys_to_virt(io_tlb_start);
bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ set_cvm_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
+#endif
memset(vaddr, 0, bytes);
}
@@ -265,8 +277,13 @@ swiotlb_init(int verbose)
/* Get IO TLB memory from the low pages */
vstart = memblock_alloc_low(PAGE_ALIGN(bytes), PAGE_SIZE);
- if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
+ if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) {
+#ifdef CONFIG_CVM_GUEST
+ if (is_cvm_world())
+ swiotlb_update_mem_attributes();
+#endif
return;
+ }
if (io_tlb_start) {
memblock_free_early(io_tlb_start,
@@ -772,3 +789,70 @@ static int __init swiotlb_create_debugfs(void)
late_initcall(swiotlb_create_debugfs);
#endif
+
+#ifdef CONFIG_CVM_GUEST
+struct page *swiotlb_alloc(struct device *dev, size_t size)
+{
+ phys_addr_t tlb_addr;
+ int index;
+
+ index = find_slots(dev, 0, size);
+ if (index == -1)
+ return NULL;
+
+ tlb_addr = slot_addr(io_tlb_start, index);
+ return pfn_to_page(PFN_DOWN(tlb_addr));
+}
+
+static void swiotlb_release_slots(struct device *hwdev, phys_addr_t tlb_addr,
+ size_t alloc_size)
+{
+ unsigned long flags;
+ unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr);
+ int i, count, nslots = nr_slots(alloc_size + offset);
+ int index = (tlb_addr - offset - io_tlb_start) >> IO_TLB_SHIFT;
+
+ /*
+ * Return the buffer to the free list by setting the corresponding
+ * entries to indicate the number of contiguous entries available.
+ * While returning the entries to the free list, we merge the entries
+ * with slots below and above the pool being returned.
+ */
+ spin_lock_irqsave(&io_tlb_lock, flags);
+ if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
+ count = io_tlb_list[index + nslots];
+ else
+ count = 0;
+
+ /*
+ * Step 1: return the slots to the free list, merging the slots with
+ * superceeding slots
+ */
+ for (i = index + nslots - 1; i >= index; i--) {
+ io_tlb_list[i] = ++count;
+ io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+ }
+
+ /*
+ * Step 2: merge the returned slots with the preceding slots, if
+ * available (non zero)
+ */
+ for (i = index - 1;
+ io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && io_tlb_list[i];
+ i--)
+ io_tlb_list[i] = ++count;
+ io_tlb_used -= nslots;
+ spin_unlock_irqrestore(&io_tlb_lock, flags);
+}
+
+bool swiotlb_free(struct device *dev, struct page *page, size_t size)
+{
+ phys_addr_t tlb_addr = page_to_phys(page);
+
+ if (!is_swiotlb_buffer(tlb_addr))
+ return false;
+
+ swiotlb_release_slots(dev, tlb_addr, size);
+ return true;
+}
+#endif
--
2.33.0
1
0
From: Jingxian He <hejingxian(a)huawei.com>
hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
CVE: N/A
------------
Enable pmu phys irq inject to Confidential VMs.
Signed-off-by: Jingxian He <hejingxian(a)huawei.com>
---
arch/arm64/include/asm/kvm_tmi.h | 5 ++++-
arch/arm64/kvm/arm.c | 24 ++++++++++++++++++++++++
arch/arm64/kvm/pmu-emul.c | 9 +++++++++
drivers/perf/arm_pmu.c | 17 +++++++++++++++++
include/linux/perf/arm_pmu.h | 3 +++
5 files changed, 57 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/kvm_tmi.h b/arch/arm64/include/asm/kvm_tmi.h
index 554b3e439..49ae4f77c 100644
--- a/arch/arm64/include/asm/kvm_tmi.h
+++ b/arch/arm64/include/asm/kvm_tmi.h
@@ -4,6 +4,7 @@
*/
#ifndef __TMM_TMI_H
#define __TMM_TMI_H
+#ifdef CONFIG_CVM_HOST
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_pgtable.h>
@@ -144,6 +145,7 @@ struct tmi_tec_exit {
uint64_t cntp_ctl;
uint64_t cntp_cval;
uint64_t imm;
+ uint64_t pmu_ovf_status;
};
struct tmi_tec_run {
@@ -370,4 +372,5 @@ unsigned long cvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu,
int kvm_cvm_vcpu_set_events(struct kvm_vcpu *vcpu,
bool serror_pending, bool ext_dabt_pending);
-#endif
+#endif
+#endif
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 32974a10e..6790b06f9 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -40,6 +40,7 @@
#include <asm/sections.h>
#ifdef CONFIG_CVM_HOST
#include <asm/kvm_tmi.h>
+#include <linux/perf/arm_pmu.h>
#endif
#include <kvm/arm_hypercalls.h>
@@ -890,6 +891,18 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
xfer_to_guest_mode_work_pending();
}
+#ifdef CONFIG_CVM_HOST
+static inline void update_pmu_phys_irq(struct kvm_vcpu *vcpu, bool *pmu_stopped)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+
+ if (pmu->irq_level) {
+ *pmu_stopped = true;
+ arm_pmu_set_phys_irq(false);
+ }
+}
+#endif
+
/**
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
* @vcpu: The VCPU pointer
@@ -934,6 +947,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
ret = 1;
run->exit_reason = KVM_EXIT_UNKNOWN;
while (ret > 0) {
+#ifdef CONFIG_CVM_HOST
+ bool pmu_stopped = false;
+#endif
/*
* Check conditions before entering the guest
*/
@@ -953,6 +969,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
preempt_disable();
kvm_pmu_flush_hwstate(vcpu);
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu))
+ update_pmu_phys_irq(vcpu, &pmu_stopped);
+#endif
local_irq_disable();
@@ -1063,6 +1083,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
}
#endif
preempt_enable();
+#ifdef CONFIG_CVM_HOST
+ if (pmu_stopped)
+ arm_pmu_set_phys_irq(true);
+#endif
/*
* The ARMv8 architecture doesn't give the hypervisor
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 9fdc76c6d..00aa9ebe6 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -13,6 +13,7 @@
#include <asm/kvm_emulate.h>
#include <kvm/arm_pmu.h>
#include <kvm/arm_vgic.h>
+#include <asm/kvm_tmi.h>
static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
@@ -370,6 +371,14 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
{
u64 reg = 0;
+#ifdef CONFIG_CVM_HOST
+ if (vcpu_is_tec(vcpu)) {
+ struct tmi_tec_run *run = vcpu->arch.tec.tec_run;
+ reg = run->tec_exit.pmu_ovf_status;
+ return reg;
+ }
+#endif
+
if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 4ef8aee84..743f52d94 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -797,6 +797,23 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
}
+#ifdef CONFIG_CVM_HOST
+void arm_pmu_set_phys_irq(bool enable)
+{
+ int cpu = get_cpu();
+ struct arm_pmu *pmu = per_cpu(cpu_armpmu, cpu);
+ int irq;
+
+ irq = armpmu_get_cpu_irq(pmu, cpu);
+ if (irq && !enable)
+ per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq);
+ else if (irq && enable)
+ per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq);
+
+ put_cpu();
+}
+#endif
+
#ifdef CONFIG_CPU_PM
static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
{
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 6fd58c8f9..c7a35d321 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -189,6 +189,9 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
#endif
+#ifdef CONFIG_CVM_HOST
+void arm_pmu_set_phys_irq(bool enable);
+#endif
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
struct arm_pmu *armpmu_alloc_atomic(void);
--
2.33.0
1
0