
From: Konstantin Meskhidze <konstantin.meskhidze@huawei.com> hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IC5EHB ----------------------------------------- Add xcu group alloc/find/attach/detach funcs implementation. Add xsched_cu data structures, all related enumerators. Add xsched_register_xcu() for driver to register xcu. Add XSCHED_NR_CUS config parameters. Signed-off-by: Konstantin Meskhidze <konstantin.meskhidze@huawei.com> Signed-off-by: Hui Tang <tanghui20@.huawei.com> --- drivers/xcu/xcu_group.c | 127 ++++++++++++++++++++++++++++++++++++++ include/linux/xcu_group.h | 29 +++++++++ include/linux/xsched.h | 41 ++++++++++++ kernel/xsched/Kconfig | 5 ++ kernel/xsched/Makefile | 1 + kernel/xsched/core.c | 111 +++++++++++++++++++++++++++++++++ 6 files changed, 314 insertions(+) create mode 100644 kernel/xsched/core.c diff --git a/drivers/xcu/xcu_group.c b/drivers/xcu/xcu_group.c index 11bf0e54aaaa..a2419db384ac 100644 --- a/drivers/xcu/xcu_group.c +++ b/drivers/xcu/xcu_group.c @@ -16,7 +16,134 @@ * more details. * */ +#include <linux/rwsem.h> +#include <linux/slab.h> #include <linux/xcu_group.h> +#include <linux/xsched.h> + +static DECLARE_RWSEM(xcu_group_rwsem); + +struct xcu_group *xcu_group_alloc(void) +{ + struct xcu_group *node = kzalloc(sizeof(*node), GFP_KERNEL); + + if (!node) + return node; + + node->type = XCU_TYPE_NPU; + idr_init(&node->next_layer); + + return node; +} +EXPORT_SYMBOL(xcu_group_alloc); + +int __xcu_group_attach(struct xcu_group *new_group, + struct xcu_group *previous_group) +{ + int id = new_group->id; + + if (id == -1) + id = idr_alloc(&previous_group->next_layer, new_group, 0, + INT_MAX, GFP_KERNEL); + else + id = idr_alloc(&previous_group->next_layer, new_group, id, + id + 1, GFP_KERNEL); + if (id < 0) { + XSCHED_ERR("Attach xcu_group failed: id confilict @ %s\n", + __func__); + return -EEXIST; + } + + new_group->id = id; + new_group->previous_layer = previous_group; + + return 0; +} + +int xcu_group_attach(struct xcu_group *new_group, + struct xcu_group *previous_group) +{ + int ret; + + down_write(&xcu_group_rwsem); + ret = __xcu_group_attach(new_group, previous_group); + up_write(&xcu_group_rwsem); + + return ret; +} +EXPORT_SYMBOL(xcu_group_attach); + +struct xcu_group *xcu_group_alloc_and_attach(struct xcu_group *previous_group, + int id) +{ + struct xcu_group *new = xcu_group_alloc(); + + if (!new) { + XSCHED_ERR("Alloc xcu_group failed @ %s\n", __func__); + return NULL; + } + new->id = id; + + if (!xcu_group_attach(new, previous_group)) + return NULL; + + return new; +} +EXPORT_SYMBOL(xcu_group_alloc_and_attach); + +static inline int __xcu_group_detach(struct xcu_group *group) +{ + idr_remove(&group->previous_layer->next_layer, group->id); + return 0; +} + +int xcu_group_detach(struct xcu_group *group) +{ + int ret; + + down_write(&xcu_group_rwsem); + ret = __xcu_group_detach(group); + up_write(&xcu_group_rwsem); + + return ret; +} +EXPORT_SYMBOL(xcu_group_detach); + +static struct xcu_group *__xcu_group_find_nolock(struct xcu_group *group, + int id) +{ + return idr_find(&group->next_layer, id); +} + +struct xcu_group *xcu_group_find_noalloc(struct xcu_group *group, int id) +{ + struct xcu_group *result; + + down_read(&xcu_group_rwsem); + result = __xcu_group_find_nolock(group, id); + up_read(&xcu_group_rwsem); + + return result; +} +EXPORT_SYMBOL(xcu_group_find_noalloc); + +struct xcu_group *xcu_group_find(struct xcu_group *group, int id) +{ + struct xcu_group *target_group; + + down_read(&xcu_group_rwsem); + target_group = __xcu_group_find_nolock(group, id); + up_read(&xcu_group_rwsem); + + if (!target_group) { + target_group = xcu_group_alloc(); + target_group->type = id; + target_group->id = id; + } + + return target_group; +} +EXPORT_SYMBOL(xcu_group_find); /* This function runs "run" callback for a given xcu_group * and a given vstream that are passed within diff --git a/include/linux/xcu_group.h b/include/linux/xcu_group.h index 36d80d8d7ee9..55facb0e5760 100644 --- a/include/linux/xcu_group.h +++ b/include/linux/xcu_group.h @@ -5,6 +5,11 @@ #include <linux/idr.h> #include <uapi/linux/xcu_vstream.h> +#ifndef CONFIG_XSCHED_NR_CUS +#define CONFIG_XSCHED_NR_CUS 1 +#endif /* !CONFIG_XSCHED_NR_CUS */ +#define XSCHED_NR_CUS CONFIG_XSCHED_NR_CUS + extern struct xcu_group *xcu_group_root; enum xcu_type { @@ -12,6 +17,11 @@ enum xcu_type { XCU_TYPE_NPU }; +enum xcu_version { + XCU_HW_V1, + XCU_HW_V2 +}; + struct xcu_op_handler_params { }; @@ -29,11 +39,30 @@ struct xcu_group { /* sq id. */ uint32_t id; + /* Version of XCU group */ + enum xcu_version ver; + /* Type of XCU group. */ enum xcu_type type; /* IDR for the next layer of XCU group tree. */ struct idr next_layer; + + /* Pointer to the previous XCU group in the XCU group tree. */ + struct xcu_group *previous_layer; + + /* Pointer to operation fn pointers object describing + * this XCU group's callbacks. + */ + struct xcu_operation *opt; + + /* Pointer to the XCU related to this XCU group. */ + struct xsched_cu *xcu; + + /* Mask of XCU ids associated with this XCU group + * and this group's children's XCUs. + */ + DECLARE_BITMAP(xcu_mask, XSCHED_NR_CUS); }; #ifdef CONFIG_XCU_SCHEDULER diff --git a/include/linux/xsched.h b/include/linux/xsched.h index c2f56678dda4..bdac1653ae36 100644 --- a/include/linux/xsched.h +++ b/include/linux/xsched.h @@ -2,6 +2,7 @@ #ifndef __LINUX_XSCHED_H__ #define __LINUX_XSCHED_H__ +#include <linux/xcu_group.h> #ifndef pr_fmt #define pr_fmt(fmt) fmt #endif @@ -48,4 +49,44 @@ #define __XSCHED_TRACE(fmt, ...) #endif +enum xcu_state { + XCU_INACTIVE, + XCU_IDLE, + XCU_BUSY, + XCU_SUBMIT, +}; + +enum xsched_cu_status { + /* Worker not initialized. */ + XSCHED_XCU_NONE, + + /* Worker is sleeping in idle state. */ + XSCHED_XCU_WAIT_IDLE, + + /* Worker is sleeping in running state. */ + XSCHED_XCU_WAIT_RUNNING, + + /* Worker is active but not processing anything. */ + XSCHED_XCU_ACTIVE, + + NR_XSCHED_XCU_STATUS +}; + +/* This is the abstraction object of the xcu computing unit. */ +struct xsched_cu { + uint32_t id; + uint32_t state; + + struct task_struct *worker; + + struct xcu_group *group; + + struct mutex xcu_lock; + + wait_queue_head_t wq_xcu_idle; + wait_queue_head_t wq_xcu_running; + wait_queue_head_t wq_xcore_running; +}; + +int xsched_register_xcu(struct xcu_group *group); #endif /* !__LINUX_XSCHED_H__ */ diff --git a/kernel/xsched/Kconfig b/kernel/xsched/Kconfig index cbe937a90d80..cbd2eec8bfad 100644 --- a/kernel/xsched/Kconfig +++ b/kernel/xsched/Kconfig @@ -36,3 +36,8 @@ config XSCHED_DEBUG_PRINTS analyzing task scheduling behavior, and tracking internal state changes. Enabling this may impact performance due to increased log output. If unsure, say N. + +config XSCHED_NR_CUS + int "Number of CUs (a.k.a. XCUs) available to XSched mechanism" + default 8 + depends on XCU_SCHEDULER diff --git a/kernel/xsched/Makefile b/kernel/xsched/Makefile index e972cd93b607..62e58e4151b0 100644 --- a/kernel/xsched/Makefile +++ b/kernel/xsched/Makefile @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 obj-y += vstream.o +obj-$(CONFIG_XCU_SCHEDULER) += core.o diff --git a/kernel/xsched/core.c b/kernel/xsched/core.c new file mode 100644 index 000000000000..96a814275963 --- /dev/null +++ b/kernel/xsched/core.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Core kernel scheduler code for XPU device + * + * Copyright (C) 2025-2026 Huawei Technologies Co., Ltd + * + * Author: Konstantin Meskhidze <konstantin.meskhidze@huawei.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include <linux/kthread.h> +#include <linux/slab.h> +#include <linux/spinlock_types.h> +#include <linux/types.h> +#include <linux/xsched.h> +#include <uapi/linux/sched/types.h> + +int num_active_xcu; +spinlock_t xcu_mgr_lock; + +/* Xsched XCU array and bitmask that represents which XCUs + * are present and online. + */ +DECLARE_BITMAP(xcu_online_mask, XSCHED_NR_CUS); +struct xsched_cu *xsched_cu_mgr[XSCHED_NR_CUS]; + +static int xsched_schedule(void *input_xcu) +{ + return 0; +} + +/* Initializes all xsched XCU objects. + * Should only be called from xsched_register_xcu function. + */ +static void xsched_xcu_init(struct xsched_cu *xcu, struct xcu_group *group, + int xcu_id) +{ + bitmap_clear(xcu_group_root->xcu_mask, 0, XSCHED_NR_CUS); + + xcu->id = xcu_id; + xcu->state = XSCHED_XCU_NONE; + xcu->group = group; + + mutex_init(&xcu->xcu_lock); + + /* Mark current XCU in a mask inside XCU root group. */ + set_bit(xcu->id, xcu_group_root->xcu_mask); + + /* This worker should set XCU to XSCHED_XCU_WAIT_IDLE. + * If after initialization XCU still has XSCHED_XCU_NONE + * status then we can assume that there was a problem + * with XCU kthread job. + */ + xcu->worker = kthread_run(xsched_schedule, xcu, "xcu_%u", xcu->id); +} + +/* Allocates xcu id in xcu_manager array. */ +static int alloc_xcu_id(void) +{ + int xcu_id = -1; + + spin_lock(&xcu_mgr_lock); + if (num_active_xcu >= XSCHED_NR_CUS) + goto out_unlock; + + xcu_id = num_active_xcu; + num_active_xcu++; + XSCHED_INFO("Number of active xcus: %d.\n", num_active_xcu); + +out_unlock: + spin_unlock(&xcu_mgr_lock); + return xcu_id; +} + +/* + * Initialize and register xcu in xcu_manager array. + */ +int xsched_register_xcu(struct xcu_group *group) +{ + int xcu_id; + struct xsched_cu *xcu; + + xcu_id = alloc_xcu_id(); + if (xcu_id < 0) { + XSCHED_ERR("Alloc xcu_id failed.\n"); + return -1; + }; + + xcu = kzalloc(sizeof(struct xsched_cu), GFP_KERNEL); + if (!xcu) { + XSCHED_ERR("Alloc xcu structure failed.\n"); + return -1; + }; + + group->xcu = xcu; + xsched_cu_mgr[xcu_id] = xcu; + + /* Init xcu's internals. */ + xsched_xcu_init(xcu, group, xcu_id); + + return 0; +} +EXPORT_SYMBOL(xsched_register_xcu); -- 2.34.1