From: Wen Gu guwen@linux.alibaba.com
mainline inclusion from mainline-v6.10-rc1 commit c3a910f2380fe294d14e42af66af3d3eed8fecbf category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IACM52
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
This implements operations related to merging sndbuf with peer DMB in loopback-ism. The DMB won't be freed until no sndbuf is attached to it.
Signed-off-by: Wen Gu guwen@linux.alibaba.com Reviewed-by: Wenjia Zhang wenjia@linux.ibm.com Reviewed-and-tested-by: Jan Karcher jaka@linux.ibm.com Signed-off-by: Paolo Abeni pabeni@redhat.com Signed-off-by: Zhengchao Shao shaozhengchao@huawei.com --- net/smc/smc_loopback.c | 120 +++++++++++++++++++++++++++++++++++------ net/smc/smc_loopback.h | 3 ++ 2 files changed, 108 insertions(+), 15 deletions(-)
diff --git a/net/smc/smc_loopback.c b/net/smc/smc_loopback.c index 94a57f4ee3f9..3c5f64ca4115 100644 --- a/net/smc/smc_loopback.c +++ b/net/smc/smc_loopback.c @@ -20,6 +20,7 @@ #include "smc_loopback.h"
#define SMC_LO_V2_CAPABLE 0x1 /* loopback-ism acts as ISMv2 */ +#define SMC_LO_SUPPORT_NOCOPY 0x1 #define SMC_DMA_ADDR_INVALID (~(dma_addr_t)0)
static const char smc_lo_dev_name[] = "loopback-ism"; @@ -81,6 +82,7 @@ static int smc_lo_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb, goto err_node; } dmb_node->dma_addr = SMC_DMA_ADDR_INVALID; + refcount_set(&dmb_node->refcnt, 1);
again: /* add new dmb into hash table */ @@ -94,6 +96,7 @@ static int smc_lo_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb, } hash_add(ldev->dmb_ht, &dmb_node->list, dmb_node->token); write_unlock_bh(&ldev->dmb_ht_lock); + atomic_inc(&ldev->dmb_cnt);
dmb->sba_idx = dmb_node->sba_idx; dmb->dmb_tok = dmb_node->token; @@ -110,13 +113,29 @@ static int smc_lo_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb, return rc; }
+static void __smc_lo_unregister_dmb(struct smc_lo_dev *ldev, + struct smc_lo_dmb_node *dmb_node) +{ + /* remove dmb from hash table */ + write_lock_bh(&ldev->dmb_ht_lock); + hash_del(&dmb_node->list); + write_unlock_bh(&ldev->dmb_ht_lock); + + clear_bit(dmb_node->sba_idx, ldev->sba_idx_mask); + kvfree(dmb_node->cpu_addr); + kfree(dmb_node); + + if (atomic_dec_and_test(&ldev->dmb_cnt)) + wake_up(&ldev->ldev_release); +} + static int smc_lo_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) { struct smc_lo_dmb_node *dmb_node = NULL, *tmp_node; struct smc_lo_dev *ldev = smcd->priv;
- /* remove dmb from hash table */ - write_lock_bh(&ldev->dmb_ht_lock); + /* find dmb from hash table */ + read_lock_bh(&ldev->dmb_ht_lock); hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) { if (tmp_node->token == dmb->dmb_tok) { dmb_node = tmp_node; @@ -124,16 +143,76 @@ static int smc_lo_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) } } if (!dmb_node) { - write_unlock_bh(&ldev->dmb_ht_lock); + read_unlock_bh(&ldev->dmb_ht_lock); return -EINVAL; } - hash_del(&dmb_node->list); - write_unlock_bh(&ldev->dmb_ht_lock); + read_unlock_bh(&ldev->dmb_ht_lock);
- clear_bit(dmb_node->sba_idx, ldev->sba_idx_mask); - kfree(dmb_node->cpu_addr); - kfree(dmb_node); + if (refcount_dec_and_test(&dmb_node->refcnt)) + __smc_lo_unregister_dmb(ldev, dmb_node); + return 0; +} + +static int smc_lo_support_dmb_nocopy(struct smcd_dev *smcd) +{ + return SMC_LO_SUPPORT_NOCOPY; +} + +static int smc_lo_attach_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) +{ + struct smc_lo_dmb_node *dmb_node = NULL, *tmp_node; + struct smc_lo_dev *ldev = smcd->priv; + + /* find dmb_node according to dmb->dmb_tok */ + read_lock_bh(&ldev->dmb_ht_lock); + hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) { + if (tmp_node->token == dmb->dmb_tok) { + dmb_node = tmp_node; + break; + } + } + if (!dmb_node) { + read_unlock_bh(&ldev->dmb_ht_lock); + return -EINVAL; + } + read_unlock_bh(&ldev->dmb_ht_lock); + + if (!refcount_inc_not_zero(&dmb_node->refcnt)) + /* the dmb is being unregistered, but has + * not been removed from the hash table. + */ + return -EINVAL;
+ /* provide dmb information */ + dmb->sba_idx = dmb_node->sba_idx; + dmb->dmb_tok = dmb_node->token; + dmb->cpu_addr = dmb_node->cpu_addr; + dmb->dma_addr = dmb_node->dma_addr; + dmb->dmb_len = dmb_node->len; + return 0; +} + +static int smc_lo_detach_dmb(struct smcd_dev *smcd, u64 token) +{ + struct smc_lo_dmb_node *dmb_node = NULL, *tmp_node; + struct smc_lo_dev *ldev = smcd->priv; + + /* find dmb_node according to dmb->dmb_tok */ + read_lock_bh(&ldev->dmb_ht_lock); + hash_for_each_possible(ldev->dmb_ht, tmp_node, list, token) { + if (tmp_node->token == token) { + dmb_node = tmp_node; + break; + } + } + if (!dmb_node) { + read_unlock_bh(&ldev->dmb_ht_lock); + return -EINVAL; + } + read_unlock_bh(&ldev->dmb_ht_lock); + + if (refcount_dec_and_test(&dmb_node->refcnt)) + __smc_lo_unregister_dmb(ldev, dmb_node); return 0; }
@@ -145,6 +224,12 @@ static int smc_lo_move_data(struct smcd_dev *smcd, u64 dmb_tok, struct smc_lo_dev *ldev = smcd->priv; struct smc_connection *conn;
+ if (!sf) + /* since sndbuf is merged with peer DMB, there is + * no need to copy data from sndbuf to peer DMB. + */ + return 0; + read_lock_bh(&ldev->dmb_ht_lock); hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_tok) { if (tmp_node->token == dmb_tok) { @@ -159,13 +244,10 @@ static int smc_lo_move_data(struct smcd_dev *smcd, u64 dmb_tok, memcpy((char *)rmb_node->cpu_addr + offset, data, size); read_unlock_bh(&ldev->dmb_ht_lock);
- if (sf) { - conn = smcd->conn[rmb_node->sba_idx]; - if (conn && !conn->killed) - tasklet_schedule(&conn->rx_tsklet); - else - return -EPIPE; - } + conn = smcd->conn[rmb_node->sba_idx]; + if (!conn || conn->killed) + return -EPIPE; + tasklet_schedule(&conn->rx_tsklet); return 0; }
@@ -197,6 +279,9 @@ static const struct smcd_ops lo_ops = { .query_remote_gid = smc_lo_query_rgid, .register_dmb = smc_lo_register_dmb, .unregister_dmb = smc_lo_unregister_dmb, + .support_dmb_nocopy = smc_lo_support_dmb_nocopy, + .attach_dmb = smc_lo_attach_dmb, + .detach_dmb = smc_lo_detach_dmb, .add_vlan_id = NULL, .del_vlan_id = NULL, .set_vlan_required = NULL, @@ -275,12 +360,17 @@ static int smc_lo_dev_init(struct smc_lo_dev *ldev) smc_lo_generate_ids(ldev); rwlock_init(&ldev->dmb_ht_lock); hash_init(ldev->dmb_ht); + atomic_set(&ldev->dmb_cnt, 0); + init_waitqueue_head(&ldev->ldev_release); + return smcd_lo_register_dev(ldev); }
static void smc_lo_dev_exit(struct smc_lo_dev *ldev) { smcd_lo_unregister_dev(ldev); + if (atomic_read(&ldev->dmb_cnt)) + wait_event(ldev->ldev_release, !atomic_read(&ldev->dmb_cnt)); }
static void smc_lo_dev_release(struct device *dev) diff --git a/net/smc/smc_loopback.h b/net/smc/smc_loopback.h index b8206338309f..6dd4292dae56 100644 --- a/net/smc/smc_loopback.h +++ b/net/smc/smc_loopback.h @@ -30,6 +30,7 @@ struct smc_lo_dmb_node { u32 sba_idx; void *cpu_addr; dma_addr_t dma_addr; + refcount_t refcnt; };
struct smc_lo_dev { @@ -37,9 +38,11 @@ struct smc_lo_dev { struct device dev; u16 chid; struct smcd_gid local_gid; + atomic_t dmb_cnt; rwlock_t dmb_ht_lock; DECLARE_BITMAP(sba_idx_mask, SMC_LO_MAX_DMBS); DECLARE_HASHTABLE(dmb_ht, SMC_LO_DMBS_HASH_BITS); + wait_queue_head_t ldev_release; };
int smc_loopback_init(void);