____cacheline_aligned_in_smp macro is needed to avoid cache bouncing in SMP system, which is used in ptr_ring lib.
So add the related macro in order to bulid ptr_ring from user space.
As SMP_CACHE_BYTES is 64 bytes for arm64 and most of x86 system, so use 64 bytes as the default SMP_CACHE_BYTES if SMP_CACHE_BYTES is not defined.
Signed-off-by: Yunsheng Lin linyunsheng@huawei.com --- tools/include/linux/cache.h | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 tools/include/linux/cache.h
diff --git a/tools/include/linux/cache.h b/tools/include/linux/cache.h new file mode 100644 index 0000000..df04307 --- /dev/null +++ b/tools/include/linux/cache.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __TOOLS_LINUX__CACHE_H +#define __TOOLS_LINUX__CACHE_H + +#ifndef CONFIG_SMP +#define CONFIG_SMP 1 +#endif + +#ifndef SMP_CACHE_BYTES +#define SMP_CACHE_BYTES 64 +#endif + +#ifndef ____cacheline_aligned +#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) +#endif + +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +#endif /* __LINUX_CACHE_H */