This patch add new mmap mode for libhns, and subsequent new features which need add mmap types will be implemented using this mmap mode.
Chengchang Tang (1): libhns: Add a new mmap mode
kernel-headers/rdma/hns-abi.h | 21 ++++++++++- providers/hns/hns_roce_u.c | 86 ++++++++++++++++++++++++++++++------------ providers/hns/hns_roce_u.h | 1 + providers/hns/hns_roce_u_abi.h | 2 +- 4 files changed, 84 insertions(+), 26 deletions(-)
-- 2.9.5
Add a new mmap mode support for hns. In the new mmap mode, the user space driver use the offset sent by kernel space driver to complete the mmap instead of a hard-coded offset.
Signed-off-by: Chengchang Tang tangchengchang@huawei.com --- kernel-headers/rdma/hns-abi.h | 21 ++++++++++- providers/hns/hns_roce_u.c | 86 ++++++++++++++++++++++++++++++------------ providers/hns/hns_roce_u.h | 1 + providers/hns/hns_roce_u_abi.h | 2 +- 4 files changed, 84 insertions(+), 26 deletions(-)
diff --git a/kernel-headers/rdma/hns-abi.h b/kernel-headers/rdma/hns-abi.h index 42b1776..eabdfd4 100644 --- a/kernel-headers/rdma/hns-abi.h +++ b/kernel-headers/rdma/hns-abi.h @@ -83,11 +83,30 @@ struct hns_roce_ib_create_qp_resp { __aligned_u64 cap_flags; };
+enum { + HNS_ROCE_ALLOC_UCTX_COMP_CONFIG = 1 << 0, +}; + +enum { + HNS_ROCE_UCTX_CONFIG_MMAP_KEY = 1 << 0, +}; + +enum { + HNS_ROCE_UCTX_SET_MMAP_KEY = 1 << 0, +}; + +struct hns_roce_ib_alloc_ucontext { + __u32 comp; + __u32 config; +}; + struct hns_roce_ib_alloc_ucontext_resp { __u32 qp_tab_size; __u32 cqe_size; __u32 srq_tab_size; - __u32 reserved; + __u8 config; + __u8 rsv[3]; + __aligned_u64 db_mmap_key; };
struct hns_roce_ib_alloc_pd_resp { diff --git a/providers/hns/hns_roce_u.c b/providers/hns/hns_roce_u.c index 3b31ad3..bdf2fff 100644 --- a/providers/hns/hns_roce_u.c +++ b/providers/hns/hns_roce_u.c @@ -95,16 +95,67 @@ static const struct verbs_context_ops hns_common_ops = { .get_srq_num = hns_roce_u_get_srq_num, };
+static int hns_roce_mmap(struct hns_roce_device *hr_dev, + struct hns_roce_context *context, int cmd_fd, + uint64_t db_mmap_key) +{ + int page_size = hr_dev->page_size; + + context->uar = mmap(NULL, page_size, PROT_READ | PROT_WRITE, + MAP_SHARED, cmd_fd, db_mmap_key); + if (context->uar == MAP_FAILED) + return -EINVAL; + + return 0; +} + +static int hns_roce_legacy_mmap(struct hns_roce_device *hr_dev, + struct hns_roce_context *context, int cmd_fd) +{ + int offset = 0; + + context->uar = mmap(NULL, hr_dev->page_size, PROT_READ | PROT_WRITE, + MAP_SHARED, cmd_fd, offset); + if (context->uar == MAP_FAILED) + return -EINVAL; + + offset += hr_dev->page_size; + + if (hr_dev->hw_version == HNS_ROCE_HW_VER1) { + /* + * when vma->vm_pgoff is 1, the cq_tptr_base includes 64K CQ, + * a pointer of CQ need 2B size + */ + context->cq_tptr_base = mmap(NULL, HNS_ROCE_CQ_DB_BUF_SIZE, + PROT_READ | PROT_WRITE, MAP_SHARED, + cmd_fd, offset); + if (context->cq_tptr_base == MAP_FAILED) + goto db_free; + } + return 0; + +db_free: + munmap(context->uar, hr_dev->page_size); + context->uar = NULL; + return -EINVAL; +} + +static void ucontext_set_cmd(struct hns_roce_alloc_ucontext *cmd) +{ + cmd->comp = HNS_ROCE_ALLOC_UCTX_COMP_CONFIG; + cmd->config = HNS_ROCE_UCTX_SET_MMAP_KEY; +} + static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, int cmd_fd, void *private_data) { struct hns_roce_device *hr_dev = to_hr_dev(ibdev); struct hns_roce_alloc_ucontext_resp resp = {}; + struct hns_roce_alloc_ucontext cmd = {}; struct ibv_device_attr dev_attrs; struct hns_roce_context *context; - struct ibv_get_context cmd; - int offset = 0; + int ret; int i;
context = verbs_init_and_alloc_context(ibdev, cmd_fd, context, ibv_ctx, @@ -112,10 +163,13 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, if (!context) return NULL;
- if (ibv_cmd_get_context(&context->ibv_ctx, &cmd, sizeof(cmd), + ucontext_set_cmd(&cmd); + if (ibv_cmd_get_context(&context->ibv_ctx, &cmd.ibv_cmd, sizeof(cmd), &resp.ibv_resp, sizeof(resp))) goto err_free;
+ context->mmap_key_support = resp.config & HNS_ROCE_UCTX_CONFIG_MMAP_KEY; + if (!resp.cqe_size) context->cqe_size = HNS_ROCE_CQE_SIZE; else if (resp.cqe_size <= HNS_ROCE_V3_CQE_SIZE) @@ -153,26 +207,14 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, context->max_cqe = dev_attrs.max_cqe; context->max_srq_wr = dev_attrs.max_srq_wr; context->max_srq_sge = dev_attrs.max_srq_sge; + if (context->mmap_key_support) + ret = hns_roce_mmap(hr_dev, context, cmd_fd, resp.db_mmap_key); + else + ret = hns_roce_legacy_mmap(hr_dev, context, cmd_fd);
- context->uar = mmap(NULL, hr_dev->page_size, PROT_READ | PROT_WRITE, - MAP_SHARED, cmd_fd, offset); - if (context->uar == MAP_FAILED) + if (ret) goto err_free;
- offset += hr_dev->page_size; - - if (hr_dev->hw_version == HNS_ROCE_HW_VER1) { - /* - * when vma->vm_pgoff is 1, the cq_tptr_base includes 64K CQ, - * a pointer of CQ need 2B size - */ - context->cq_tptr_base = mmap(NULL, HNS_ROCE_CQ_DB_BUF_SIZE, - PROT_READ | PROT_WRITE, MAP_SHARED, - cmd_fd, offset); - if (context->cq_tptr_base == MAP_FAILED) - goto db_free; - } - pthread_spin_init(&context->uar_lock, PTHREAD_PROCESS_PRIVATE);
verbs_set_ops(&context->ibv_ctx, &hns_common_ops); @@ -180,10 +222,6 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev,
return &context->ibv_ctx;
-db_free: - munmap(context->uar, hr_dev->page_size); - context->uar = NULL; - err_free: verbs_uninit_context(&context->ibv_ctx); free(context); diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h index 0d7abd8..126c9d3 100644 --- a/providers/hns/hns_roce_u.h +++ b/providers/hns/hns_roce_u.h @@ -180,6 +180,7 @@ struct hns_roce_context { unsigned int max_srq_sge; int max_cqe; unsigned int cqe_size; + bool mmap_key_support; };
struct hns_roce_pd { diff --git a/providers/hns/hns_roce_u_abi.h b/providers/hns/hns_roce_u_abi.h index e56f9d3..295808b 100644 --- a/providers/hns/hns_roce_u_abi.h +++ b/providers/hns/hns_roce_u_abi.h @@ -42,7 +42,7 @@ DECLARE_DRV_CMD(hns_roce_alloc_pd, IB_USER_VERBS_CMD_ALLOC_PD, DECLARE_DRV_CMD(hns_roce_create_cq, IB_USER_VERBS_CMD_CREATE_CQ, hns_roce_ib_create_cq, hns_roce_ib_create_cq_resp); DECLARE_DRV_CMD(hns_roce_alloc_ucontext, IB_USER_VERBS_CMD_GET_CONTEXT, - empty, hns_roce_ib_alloc_ucontext_resp); + hns_roce_ib_alloc_ucontext, hns_roce_ib_alloc_ucontext_resp);
DECLARE_DRV_CMD(hns_roce_create_qp, IB_USER_VERBS_CMD_CREATE_QP, hns_roce_ib_create_qp, hns_roce_ib_create_qp_resp);
From: Wenpeng Liang liangwenpeng@huawei.com
To commit ?? ("RDMA/hns: Dump detailed driver-specific UCTX").
Signed-off-by: Wenpeng Liang liangwenpeng@huawei.com --- kernel-headers/rdma/hns-abi.h | 86 ++++++++++++++++++++++++++++++++++++++++++ kernel-headers/rdma/mlx5-abi.h | 17 ++++++++- 2 files changed, 101 insertions(+), 2 deletions(-)
diff --git a/kernel-headers/rdma/hns-abi.h b/kernel-headers/rdma/hns-abi.h index 42b1776..40ac2c3 100644 --- a/kernel-headers/rdma/hns-abi.h +++ b/kernel-headers/rdma/hns-abi.h @@ -77,21 +77,107 @@ enum hns_roce_qp_cap_flags { HNS_ROCE_QP_CAP_RQ_RECORD_DB = 1 << 0, HNS_ROCE_QP_CAP_SQ_RECORD_DB = 1 << 1, HNS_ROCE_QP_CAP_OWNER_DB = 1 << 2, + HNS_ROCE_QP_CAP_DYNAMIC_CTX_ATTACH = 1 << 4, + HNS_ROCE_QP_CAP_DYNAMIC_CTX_DETACH = 1 << 6, };
struct hns_roce_ib_create_qp_resp { __aligned_u64 cap_flags; };
+enum { + HNS_ROCE_ALLOC_UCTX_COMP_DCA_MAX_QPS = 1 << 0, +}; + +struct hns_roce_ib_alloc_ucontext { + __u32 comp; + __u32 dca_max_qps; +}; + +enum { + HNS_ROCE_CAP_FLAG_DCA_MODE = 1 << 15, +}; + struct hns_roce_ib_alloc_ucontext_resp { __u32 qp_tab_size; __u32 cqe_size; __u32 srq_tab_size; __u32 reserved; + __aligned_u64 cap_flags; + __u32 dca_qps; + __u32 dca_mmap_size; };
struct hns_roce_ib_alloc_pd_resp { __u32 pdn; };
+enum { + HNS_ROCE_MMAP_REGULAR_PAGE, + HNS_ROCE_MMAP_DCA_PAGE, +}; + +struct hns_roce_ib_modify_qp_resp { + __u32 dcan; + __u32 reserved; +}; + +#define UVERBS_ID_NS_MASK 0xF000 +#define UVERBS_ID_NS_SHIFT 12 + +enum hns_ib_objects { + HNS_IB_OBJECT_DCA_MEM = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum hns_ib_dca_mem_methods { + HNS_IB_METHOD_DCA_MEM_REG = (1U << UVERBS_ID_NS_SHIFT), + HNS_IB_METHOD_DCA_MEM_DEREG, + HNS_IB_METHOD_DCA_MEM_SHRINK, + HNS_IB_METHOD_DCA_MEM_ATTACH, + HNS_IB_METHOD_DCA_MEM_DETACH, + HNS_IB_METHOD_DCA_MEM_QUERY, +}; + +enum hns_ib_dca_mem_reg_attrs { + HNS_IB_ATTR_DCA_MEM_REG_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + HNS_IB_ATTR_DCA_MEM_REG_LEN, + HNS_IB_ATTR_DCA_MEM_REG_ADDR, + HNS_IB_ATTR_DCA_MEM_REG_KEY, +}; + +enum hns_ib_dca_mem_dereg_attrs { + HNS_IB_ATTR_DCA_MEM_DEREG_HANDLE = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum hns_ib_dca_mem_shrink_attrs { + HNS_IB_ATTR_DCA_MEM_SHRINK_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + HNS_IB_ATTR_DCA_MEM_SHRINK_RESERVED_SIZE, + HNS_IB_ATTR_DCA_MEM_SHRINK_OUT_FREE_KEY, + HNS_IB_ATTR_DCA_MEM_SHRINK_OUT_FREE_MEMS, +}; + +#define HNS_IB_ATTACH_FLAGS_NEW_BUFFER 1U + +enum hns_ib_dca_mem_attach_attrs { + HNS_IB_ATTR_DCA_MEM_ATTACH_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + HNS_IB_ATTR_DCA_MEM_ATTACH_SQ_OFFSET, + HNS_IB_ATTR_DCA_MEM_ATTACH_SGE_OFFSET, + HNS_IB_ATTR_DCA_MEM_ATTACH_RQ_OFFSET, + HNS_IB_ATTR_DCA_MEM_ATTACH_OUT_ALLOC_FLAGS, + HNS_IB_ATTR_DCA_MEM_ATTACH_OUT_ALLOC_PAGES, +}; + +enum hns_ib_dca_mem_detach_attrs { + HNS_IB_ATTR_DCA_MEM_DETACH_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + HNS_IB_ATTR_DCA_MEM_DETACH_SQ_INDEX, +}; + +enum hns_ib_dca_mem_query_attrs { + HNS_IB_ATTR_DCA_MEM_QUERY_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + HNS_IB_ATTR_DCA_MEM_QUERY_PAGE_INDEX, + HNS_IB_ATTR_DCA_MEM_QUERY_OUT_KEY, + HNS_IB_ATTR_DCA_MEM_QUERY_OUT_OFFSET, + HNS_IB_ATTR_DCA_MEM_QUERY_OUT_PAGE_COUNT, +}; + #endif /* HNS_ABI_USER_H */ diff --git a/kernel-headers/rdma/mlx5-abi.h b/kernel-headers/rdma/mlx5-abi.h index 8597e6f..86be4a9 100644 --- a/kernel-headers/rdma/mlx5-abi.h +++ b/kernel-headers/rdma/mlx5-abi.h @@ -50,6 +50,7 @@ enum { MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8, MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE = 1 << 9, MLX5_QP_FLAG_UAR_PAGE_INDEX = 1 << 10, + MLX5_QP_FLAG_DCI_STREAM = 1 << 11, };
enum { @@ -238,6 +239,11 @@ struct mlx5_ib_striding_rq_caps { __u32 reserved; };
+struct mlx5_ib_dci_streams_caps { + __u8 max_log_num_concurent; + __u8 max_log_num_errored; +}; + enum mlx5_ib_query_dev_resp_flags { /* Support 128B CQE compression */ MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0, @@ -266,7 +272,8 @@ struct mlx5_ib_query_device_resp { struct mlx5_ib_sw_parsing_caps sw_parsing_caps; struct mlx5_ib_striding_rq_caps striding_rq_caps; __u32 tunnel_offloads_caps; /* enum mlx5_ib_tunnel_offloads */ - __u32 reserved; + struct mlx5_ib_dci_streams_caps dci_streams_caps; + __u16 reserved; };
enum mlx5_ib_create_cq_flags { @@ -313,6 +320,11 @@ struct mlx5_ib_create_srq_resp { __u32 reserved; };
+struct mlx5_ib_create_qp_dci_streams { + __u8 log_num_concurent; + __u8 log_num_errored; +}; + struct mlx5_ib_create_qp { __aligned_u64 buf_addr; __aligned_u64 db_addr; @@ -327,7 +339,8 @@ struct mlx5_ib_create_qp { __aligned_u64 access_key; }; __u32 ece_options; - __u32 reserved; + struct mlx5_ib_create_qp_dci_streams dci_streams; + __u16 reserved; };
/* RX Hash function flags */
This email was sent by mistake, please ignore it.
On 2021/9/16 18:46, Chengchang Tang wrote:
From: Wenpeng Liang liangwenpeng@huawei.com
To commit ?? ("RDMA/hns: Dump detailed driver-specific UCTX").
Signed-off-by: Wenpeng Liang liangwenpeng@huawei.com
kernel-headers/rdma/hns-abi.h | 86 ++++++++++++++++++++++++++++++++++++++++++ kernel-headers/rdma/mlx5-abi.h | 17 ++++++++- 2 files changed, 101 insertions(+), 2 deletions(-)
diff --git a/kernel-headers/rdma/hns-abi.h b/kernel-headers/rdma/hns-abi.h index 42b1776..40ac2c3 100644 --- a/kernel-headers/rdma/hns-abi.h +++ b/kernel-headers/rdma/hns-abi.h @@ -77,21 +77,107 @@ enum hns_roce_qp_cap_flags { HNS_ROCE_QP_CAP_RQ_RECORD_DB = 1 << 0, HNS_ROCE_QP_CAP_SQ_RECORD_DB = 1 << 1, HNS_ROCE_QP_CAP_OWNER_DB = 1 << 2,
- HNS_ROCE_QP_CAP_DYNAMIC_CTX_ATTACH = 1 << 4,
- HNS_ROCE_QP_CAP_DYNAMIC_CTX_DETACH = 1 << 6,
};
struct hns_roce_ib_create_qp_resp { __aligned_u64 cap_flags; };
+enum {
- HNS_ROCE_ALLOC_UCTX_COMP_DCA_MAX_QPS = 1 << 0,
+};
+struct hns_roce_ib_alloc_ucontext {
- __u32 comp;
- __u32 dca_max_qps;
+};
+enum {
- HNS_ROCE_CAP_FLAG_DCA_MODE = 1 << 15,
+};
struct hns_roce_ib_alloc_ucontext_resp { __u32 qp_tab_size; __u32 cqe_size; __u32 srq_tab_size; __u32 reserved;
- __aligned_u64 cap_flags;
- __u32 dca_qps;
- __u32 dca_mmap_size;
};
struct hns_roce_ib_alloc_pd_resp { __u32 pdn; };
+enum {
- HNS_ROCE_MMAP_REGULAR_PAGE,
- HNS_ROCE_MMAP_DCA_PAGE,
+};
+struct hns_roce_ib_modify_qp_resp {
- __u32 dcan;
- __u32 reserved;
+};
+#define UVERBS_ID_NS_MASK 0xF000 +#define UVERBS_ID_NS_SHIFT 12
+enum hns_ib_objects {
- HNS_IB_OBJECT_DCA_MEM = (1U << UVERBS_ID_NS_SHIFT),
+};
+enum hns_ib_dca_mem_methods {
- HNS_IB_METHOD_DCA_MEM_REG = (1U << UVERBS_ID_NS_SHIFT),
- HNS_IB_METHOD_DCA_MEM_DEREG,
- HNS_IB_METHOD_DCA_MEM_SHRINK,
- HNS_IB_METHOD_DCA_MEM_ATTACH,
- HNS_IB_METHOD_DCA_MEM_DETACH,
- HNS_IB_METHOD_DCA_MEM_QUERY,
+};
+enum hns_ib_dca_mem_reg_attrs {
- HNS_IB_ATTR_DCA_MEM_REG_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
- HNS_IB_ATTR_DCA_MEM_REG_LEN,
- HNS_IB_ATTR_DCA_MEM_REG_ADDR,
- HNS_IB_ATTR_DCA_MEM_REG_KEY,
+};
+enum hns_ib_dca_mem_dereg_attrs {
- HNS_IB_ATTR_DCA_MEM_DEREG_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+enum hns_ib_dca_mem_shrink_attrs {
- HNS_IB_ATTR_DCA_MEM_SHRINK_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
- HNS_IB_ATTR_DCA_MEM_SHRINK_RESERVED_SIZE,
- HNS_IB_ATTR_DCA_MEM_SHRINK_OUT_FREE_KEY,
- HNS_IB_ATTR_DCA_MEM_SHRINK_OUT_FREE_MEMS,
+};
+#define HNS_IB_ATTACH_FLAGS_NEW_BUFFER 1U
+enum hns_ib_dca_mem_attach_attrs {
- HNS_IB_ATTR_DCA_MEM_ATTACH_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
- HNS_IB_ATTR_DCA_MEM_ATTACH_SQ_OFFSET,
- HNS_IB_ATTR_DCA_MEM_ATTACH_SGE_OFFSET,
- HNS_IB_ATTR_DCA_MEM_ATTACH_RQ_OFFSET,
- HNS_IB_ATTR_DCA_MEM_ATTACH_OUT_ALLOC_FLAGS,
- HNS_IB_ATTR_DCA_MEM_ATTACH_OUT_ALLOC_PAGES,
+};
+enum hns_ib_dca_mem_detach_attrs {
- HNS_IB_ATTR_DCA_MEM_DETACH_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
- HNS_IB_ATTR_DCA_MEM_DETACH_SQ_INDEX,
+};
+enum hns_ib_dca_mem_query_attrs {
- HNS_IB_ATTR_DCA_MEM_QUERY_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
- HNS_IB_ATTR_DCA_MEM_QUERY_PAGE_INDEX,
- HNS_IB_ATTR_DCA_MEM_QUERY_OUT_KEY,
- HNS_IB_ATTR_DCA_MEM_QUERY_OUT_OFFSET,
- HNS_IB_ATTR_DCA_MEM_QUERY_OUT_PAGE_COUNT,
+};
#endif /* HNS_ABI_USER_H */ diff --git a/kernel-headers/rdma/mlx5-abi.h b/kernel-headers/rdma/mlx5-abi.h index 8597e6f..86be4a9 100644 --- a/kernel-headers/rdma/mlx5-abi.h +++ b/kernel-headers/rdma/mlx5-abi.h @@ -50,6 +50,7 @@ enum { MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8, MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE = 1 << 9, MLX5_QP_FLAG_UAR_PAGE_INDEX = 1 << 10,
- MLX5_QP_FLAG_DCI_STREAM = 1 << 11,
};
enum { @@ -238,6 +239,11 @@ struct mlx5_ib_striding_rq_caps { __u32 reserved; };
+struct mlx5_ib_dci_streams_caps {
- __u8 max_log_num_concurent;
- __u8 max_log_num_errored;
+};
enum mlx5_ib_query_dev_resp_flags { /* Support 128B CQE compression */ MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0, @@ -266,7 +272,8 @@ struct mlx5_ib_query_device_resp { struct mlx5_ib_sw_parsing_caps sw_parsing_caps; struct mlx5_ib_striding_rq_caps striding_rq_caps; __u32 tunnel_offloads_caps; /* enum mlx5_ib_tunnel_offloads */
- __u32 reserved;
- struct mlx5_ib_dci_streams_caps dci_streams_caps;
- __u16 reserved;
};
enum mlx5_ib_create_cq_flags { @@ -313,6 +320,11 @@ struct mlx5_ib_create_srq_resp { __u32 reserved; };
+struct mlx5_ib_create_qp_dci_streams {
- __u8 log_num_concurent;
- __u8 log_num_errored;
+};
struct mlx5_ib_create_qp { __aligned_u64 buf_addr; __aligned_u64 db_addr; @@ -327,7 +339,8 @@ struct mlx5_ib_create_qp { __aligned_u64 access_key; }; __u32 ece_options;
- __u32 reserved;
- struct mlx5_ib_create_qp_dci_streams dci_streams;
- __u16 reserved;
};
/* RX Hash function flags */