From: eillon <yezhenyu2(a)huawei.com>
Support using the HDBSS feature during live migration.
Hardware Dirty state tracking Structure(HDBSS) enhances tracking
translation table descriptors’ dirty state to reduce the cost of
surveying for dirtied granules.
The HDBSS enabling time and HDBSS buffer size are controlled by
QEMU.
Signed-off-by: eillon <yezhenyu2(a)huawei.com>
Signed-off-by: Xiang Chen <chenxiang66(a)hisilicon.com>
---
arch/arm64/Kconfig | 19 +++++
arch/arm64/Makefile | 4 +-
arch/arm64/include/asm/cpufeature.h | 15 ++++
arch/arm64/include/asm/esr.h | 2 +
arch/arm64/include/asm/kvm_arm.h | 1 +
arch/arm64/include/asm/kvm_asm.h | 2 +
arch/arm64/include/asm/kvm_host.h | 6 ++
arch/arm64/include/asm/kvm_pgtable.h | 1 +
arch/arm64/include/asm/sysreg.h | 11 +++
arch/arm64/kvm/arm.c | 107 +++++++++++++++++++++++++++
arch/arm64/kvm/handle_exit.c | 47 ++++++++++++
arch/arm64/kvm/hyp/pgtable.c | 8 ++
arch/arm64/kvm/mmu.c | 13 +++-
arch/arm64/tools/cpucaps | 1 +
arch/arm64/tools/sysreg | 29 ++++++++
include/uapi/linux/kvm.h | 1 +
tools/include/uapi/linux/kvm.h | 1 +
17 files changed, 266 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index fc56e4e30e29..dc5cc18e4ced 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -2302,6 +2302,25 @@ config ARM64_NMI
endmenu # "ARMv8.8 architectural features"
+menu "ARMv9.5 architectural features"
+
+config ARM64_HDBSS
+ bool "Enable support for Hardware Dirty state tracking Structure (HDBSS)"
+ default y
+ depends on AS_HAS_ARMV9_5
+ help
+ Hardware Dirty state tracking Structure(HDBSS) enhances tracking
+ translation table descriptors’ dirty state to reduce the cost of
+ surveying for dirtied granules.
+
+ The feature introduces new assembly registers (HDBSSBR_EL2 and
+ HDBSSPROD_EL2), which depends on AS_HAS_ARMV9_5.
+
+config AS_HAS_ARMV9_5
+ def_bool $(cc-option,-Wa$(comma)-march=armv9.5-a)
+
+endmenu # "ARMv9.5 architectural features"
+
config ARM64_SVE
bool "ARM Scalable Vector Extension support"
default y
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 94be26b6c147..37d70488ed22 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -86,7 +86,9 @@ endif
# freely generate instructions which are not supported by earlier architecture
# versions, which would prevent a single kernel image from working on earlier
# hardware.
-ifeq ($(CONFIG_AS_HAS_ARMV8_5), y)
+ifeq ($(CONFIG_AS_HAS_ARMV9_5), y)
+ asm-arch := armv9.5-a
+else ifeq ($(CONFIG_AS_HAS_ARMV8_5), y)
asm-arch := armv8.5-a
else ifeq ($(CONFIG_AS_HAS_ARMV8_4), y)
asm-arch := armv8.4-a
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 450124238563..7724568d8082 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -740,6 +740,21 @@ static inline bool system_supports_mixed_endian(void)
return val == 0x1;
}
+static inline bool system_supports_hdbss(void)
+{
+ u64 mmfr1;
+ u32 val;
+
+ if (!IS_ENABLED(CONFIG_ARM64_HDBSS))
+ return false;
+
+ mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+ val = cpuid_feature_extract_unsigned_field(mmfr1,
+ ID_AA64MMFR1_EL1_HAFDBS_SHIFT);
+
+ return val == ID_AA64MMFR1_EL1_HAFDBS_HDBSS;
+}
+
static __always_inline bool system_supports_fpsimd(void)
{
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 1c63256efd25..78dc4bb36491 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -148,6 +148,8 @@
#define ESR_ELx_CM (UL(1) << ESR_ELx_CM_SHIFT)
/* ISS2 field definitions for Data Aborts */
+#define ESR_ELx_HDBSSF_SHIFT (11)
+#define ESR_ELx_HDBSSF (UL(1) << ESR_ELx_HDBSSF_SHIFT)
#define ESR_ELx_TnD_SHIFT (10)
#define ESR_ELx_TnD (UL(1) << ESR_ELx_TnD_SHIFT)
#define ESR_ELx_TagAccess_SHIFT (9)
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 9385293e6d45..1f38f8b6cc2d 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -128,6 +128,7 @@
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
/* VTCR_EL2 Registers bits */
+#define VTCR_EL2_HDBSS (1UL << 45)
#define VTCR_EL2_RES1 (1U << 31)
#define VTCR_EL2_HD (1 << 22)
#define VTCR_EL2_HA (1 << 21)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 24b5e6b23417..c177c15b2f80 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -211,6 +211,8 @@ struct kvm;
struct kvm_vcpu;
struct kvm_s2_mmu;
+extern bool __read_mostly enable_hdbss;
+
DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 66c0bb96f007..a3addc91af16 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -625,6 +625,12 @@ struct kvm_vcpu_arch {
#ifdef CONFIG_CVM_HOST
struct cvm_tec tec;
#endif
+
+ /* HDBSS registers info */
+ struct {
+ u64 br_el2;
+ u64 prod_el2;
+ } hdbss;
};
/*
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index d3e354bb8351..4a03e4801127 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -180,6 +180,7 @@ enum kvm_pgtable_prot {
KVM_PGTABLE_PROT_R = BIT(2),
KVM_PGTABLE_PROT_DEVICE = BIT(3),
+ KVM_PGTABLE_PROT_DBM = BIT(4),
KVM_PGTABLE_PROT_SW0 = BIT(55),
KVM_PGTABLE_PROT_SW1 = BIT(56),
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 42358b8d678e..a3790448d839 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -1007,6 +1007,17 @@
#define PIRx_ELx_PERM(idx, perm) ((perm) << ((idx) * 4))
+#define HDBSS_MAX_SIZE HDBSSBR_EL2_SZ_2MB
+
+#define HDBSSBR_EL2(baddr, sz) ((baddr) & GENMASK(55, 12 + sz) | \
+ (sz) << HDBSSBR_EL2_SZ_SHIFT)
+#define HDBSSBR_BADDR(br) ((br) & GENMASK(55, (12 + HDBSSBR_SZ(br))))
+#define HDBSSBR_SZ(br) (((br) & HDBSSBR_EL2_SZ_MASK) >> HDBSSBR_EL2_SZ_SHIFT)
+
+#define HDBSSPROD_EL2(fsc, idx) ((fsc) << HDBSSPROD_EL2_FSC_SHIFT | \
+ (idx) << HDBSSPROD_EL2_INDEX_SHIFT)
+#define HDBSSPROD_IDX(prod) (((prod) & HDBSSPROD_EL2_INDEX_MASK) >> HDBSSPROD_EL2_INDEX_SHIFT)
+
#define ARM64_FEATURE_FIELD_BITS 4
/* Defined for compatibility only, do not add new users. */
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 333c65ced849..a469496ea515 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -85,6 +85,72 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
}
+bool __read_mostly enable_hdbss = 0;
+
+static int kvm_cap_arm_enable_hdbss(struct kvm *kvm,
+ struct kvm_enable_cap *cap)
+{
+ unsigned long i;
+ struct kvm_vcpu *vcpu;
+ struct page *hdbss_pg;
+ int size = cap->args[0];
+
+ if (!system_supports_hdbss()) {
+ kvm_err("This system does not support HDBSS!\n");
+ return -EINVAL;
+ }
+
+ if (size < 0 || size > HDBSS_MAX_SIZE) {
+ kvm_err("Invaild HDBSS buffer size: %d!\n", size);
+ return -EINVAL;
+ }
+
+ /* Enable the HDBSS feature if size > 0, otherwise disable it. */
+ if (size) {
+ kvm->arch.vtcr |= VTCR_EL2_HD | VTCR_EL2_HDBSS;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ hdbss_pg = alloc_pages(GFP_KERNEL, size);
+ if (!hdbss_pg) {
+ kvm_err("Alloc HDBSS buffer failed!\n");
+ return -EINVAL;
+ }
+
+ vcpu->arch.hdbss.br_el2 = HDBSSBR_EL2(page_to_phys(hdbss_pg), size);
+ vcpu->arch.hdbss.prod_el2 = 0;
+
+ /*
+ * We should kick vcpus out of guest mode here to
+ * load new vtcr value to vtcr_el2 register when
+ * re-enter guest mode.
+ */
+ kvm_vcpu_kick(vcpu);
+ }
+
+ enable_hdbss = true;
+ kvm_info("Enable HDBSS success, HDBSS buffer size: %d\n", size);
+ } else if (enable_hdbss) {
+ kvm->arch.vtcr &= ~(VTCR_EL2_HD | VTCR_EL2_HDBSS);
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ /* Kick vcpus to flush hdbss buffer. */
+ kvm_vcpu_kick(vcpu);
+
+ hdbss_pg = phys_to_page(HDBSSBR_BADDR(vcpu->arch.hdbss.br_el2));
+ if (hdbss_pg)
+ __free_pages(hdbss_pg, HDBSSBR_SZ(vcpu->arch.hdbss.br_el2));
+
+ vcpu->arch.hdbss.br_el2 = 0;
+ vcpu->arch.hdbss.prod_el2 = 0;
+ }
+
+ enable_hdbss = false;
+ kvm_info("Disable HDBSS success\n");
+ }
+
+ return 0;
+}
+
int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
struct kvm_enable_cap *cap)
{
@@ -132,6 +198,9 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
}
mutex_unlock(&kvm->slots_lock);
break;
+ case KVM_CAP_ARM_HW_DIRTY_STATE_TRACK:
+ r = kvm_cap_arm_enable_hdbss(kvm, cap);
+ break;
default:
r = -EINVAL;
break;
@@ -347,6 +416,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = sdev_enable;
break;
#endif
+ case KVM_CAP_ARM_HW_DIRTY_STATE_TRACK:
+ r = system_supports_hdbss();
+ break;
default:
r = 0;
}
@@ -475,6 +547,28 @@ static void update_steal_time(struct kvm_vcpu *vcpu)
}
#endif
+static void kvm_arm_load_hdbss(struct kvm_vcpu *vcpu)
+{
+ if (!enable_hdbss)
+ return;
+
+ dsb(sy);
+ isb();
+ write_sysreg(vcpu->arch.hdbss.br_el2, hdbssbr_el2);
+ write_sysreg(vcpu->arch.hdbss.prod_el2, hdbssprod_el2);
+}
+
+static void kvm_arm_save_hdbss(struct kvm_vcpu *vcpu)
+{
+ if (!enable_hdbss)
+ return;
+
+ dsb(sy);
+ isb();
+ vcpu->arch.hdbss.br_el2 = read_sysreg(hdbssbr_el2);
+ vcpu->arch.hdbss.prod_el2 = read_sysreg(hdbssprod_el2);
+}
+
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvm_s2_mmu *mmu;
@@ -520,6 +614,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu_ptrauth_disable(vcpu);
kvm_arch_vcpu_load_debug_state_flags(vcpu);
+ kvm_arm_load_hdbss(vcpu);
+
if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
vcpu_set_on_unsupported_cpu(vcpu);
@@ -531,6 +627,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
+ kvm_arm_save_hdbss(vcpu);
kvm_arch_vcpu_put_debug_state_flags(vcpu);
kvm_arch_vcpu_put_fp(vcpu);
if (has_vhe())
@@ -1626,7 +1723,17 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
+ /*
+ * Flush all CPUs' dirty log buffers to the dirty_bitmap. Called
+ * before reporting dirty_bitmap to userspace. KVM flushes the buffers
+ * on all VM-Exits, thus we only need to kick running vCPUs to force a
+ * VM-Exit.
+ */
+ struct kvm_vcpu *vcpu;
+ unsigned long i;
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_vcpu_kick(vcpu);
}
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 90959b8b6228..1f3386d948cd 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -292,6 +292,50 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
return arm_exit_handlers[esr_ec];
}
+#define HDBSS_ENTRY_VALID_SHIFT 0
+#define HDBSS_ENTRY_VALID_MASK (1UL << HDBSS_ENTRY_VALID_SHIFT)
+#define HDBSS_ENTRY_IPA_SHIFT 12
+#define HDBSS_ENTRY_IPA_MASK GENMASK_ULL(55, HDBSS_ENTRY_IPA_SHIFT)
+
+static void kvm_flush_hdbss_buffer(struct kvm_vcpu *vcpu)
+{
+ int idx, curr_idx;
+ u64 *hdbss_buf;
+
+ if (!enable_hdbss)
+ return;
+
+ dsb(sy);
+ isb();
+ curr_idx = HDBSSPROD_IDX(read_sysreg(hdbssprod_el2));
+
+ /* Do nothing if HDBSS buffer is empty. */
+ if (curr_idx == 0)
+ return;
+
+ hdbss_buf = page_address(phys_to_page(HDBSSBR_BADDR(vcpu->arch.hdbss.br_el2)));
+ if (!hdbss_buf) {
+ kvm_err("Enter flush hdbss buffer with buffer == NULL!");
+ return;
+ }
+
+ for (idx = 0; idx < curr_idx; idx++) {
+ u64 gpa;
+
+ gpa = hdbss_buf[idx];
+ if (!(gpa & HDBSS_ENTRY_VALID_MASK))
+ continue;
+
+ gpa = gpa & HDBSS_ENTRY_IPA_MASK;
+ kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
+ }
+
+ /* reset HDBSS index */
+ dsb(sy);
+ isb();
+ write_sysreg(0, hdbssprod_el2);
+}
+
/*
* We may be single-stepping an emulated instruction. If the emulation
* has been completed in the kernel, we can return to userspace with a
@@ -302,6 +346,9 @@ static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
{
int handled;
+ if (enable_hdbss)
+ kvm_flush_hdbss_buffer(vcpu);
+
/*
* See ARM ARM B1.14.1: "Hyp traps on instructions
* that fail their condition code check"
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index a09a180b63c8..0430d136ef02 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -44,6 +44,8 @@
#define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
+#define KVM_PTE_LEAF_ATTR_HI_S2_DBM BIT(51)
+
#define KVM_PTE_LEAF_ATTR_HI_S1_GP BIT(50)
#define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
@@ -711,6 +713,9 @@ static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot p
if (prot & KVM_PGTABLE_PROT_W)
attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
+ if (prot & KVM_PGTABLE_PROT_DBM)
+ attr |= KVM_PTE_LEAF_ATTR_HI_S2_DBM;
+
attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
@@ -1311,6 +1316,9 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
if (prot & KVM_PGTABLE_PROT_W)
set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
+ if (prot & KVM_PGTABLE_PROT_DBM)
+ set |= KVM_PTE_LEAF_ATTR_HI_S2_DBM;
+
if (prot & KVM_PGTABLE_PROT_X)
clr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 121a3d90240d..cd3986d9523b 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1574,6 +1574,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (writable)
prot |= KVM_PGTABLE_PROT_W;
+ if (enable_hdbss)
+ prot |= KVM_PGTABLE_PROT_DBM;
+
if (exec_fault)
prot |= KVM_PGTABLE_PROT_X;
@@ -1641,7 +1644,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
unsigned long fault_status;
phys_addr_t fault_ipa;
struct kvm_memory_slot *memslot;
- unsigned long hva;
+ unsigned long hva, iss2;
bool is_iabt, write_fault, writable;
gfn_t gfn;
int ret, idx;
@@ -1650,6 +1653,14 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
+ iss2 = ESR_ELx_ISS2(kvm_vcpu_get_esr(vcpu));
+
+ /*
+ * HDBSS buffer already flushed when enter handle_trap_exceptions().
+ * Nothing to do here.
+ */
+ if (fault_status == ESR_ELx_FSC_PERM && (iss2 & ESR_ELx_HDBSSF))
+ return 1;
if (fault_status == ESR_ELx_FSC_FAULT) {
/* Beyond sanitised PARange (which is the IPA limit) */
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 2d3df8c73158..78a61dc4640a 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -102,6 +102,7 @@ WORKAROUND_NVIDIA_CARMEL_CNP
WORKAROUND_QCOM_FALKOR_E1003
WORKAROUND_REPEAT_TLBI
WORKAROUND_SPECULATIVE_AT
+HAS_HDBSS
WORKAROUND_SPECULATIVE_UNPRIV_LOAD
WORKAROUND_HISILICON_ERRATUM_162100125
WORKAROUND_HISI_HIP08_RU_PREFETCH
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index 0e7d7f327410..2086217d2e56 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -1527,6 +1527,7 @@ UnsignedEnum 3:0 HAFDBS
0b0000 NI
0b0001 AF
0b0010 DBM
+ 0b0100 HDBSS
EndEnum
EndSysreg
@@ -2342,6 +2343,34 @@ Sysreg SMCR_EL2 3 4 1 2 6
Fields SMCR_ELx
EndSysreg
+Sysreg HDBSSBR_EL2 3 4 2 3 2
+Res0 63:56
+Field 55:12 BADDR
+Res0 11:4
+Enum 3:0 SZ
+ 0b0001 8KB
+ 0b0010 16KB
+ 0b0011 32KB
+ 0b0100 64KB
+ 0b0101 128KB
+ 0b0110 256KB
+ 0b0111 512KB
+ 0b1000 1MB
+ 0b1001 2MB
+EndEnum
+EndSysreg
+
+Sysreg HDBSSPROD_EL2 3 4 2 3 3
+Res0 63:32
+Enum 31:26 FSC
+ 0b000000 OK
+ 0b010000 ExternalAbort
+ 0b101000 GPF
+EndEnum
+Res0 25:19
+Field 18:0 INDEX
+EndSysreg
+
Sysreg DACR32_EL2 3 4 3 0 0
Res0 63:32
Field 31:30 D15
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 68f08c5267f6..9882290762d6 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1200,6 +1200,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_COUNTER_OFFSET 227
#define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
+#define KVM_CAP_ARM_HW_DIRTY_STATE_TRACK 230
#define KVM_CAP_ARM_VIRT_MSI_BYPASS 799
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index f089ab290978..f31f40e117ae 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -1192,6 +1192,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_COUNTER_OFFSET 227
#define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
+#define KVM_CAP_ARM_HW_DIRTY_STATE_TRACK 230
#ifdef KVM_CAP_IRQ_ROUTING
--
2.33.0