From: Shao Denghui shaodenghui@huawei.com
euleros inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I94XYA CVE: NA
-------------------------------------------------
On aarch64 NUMA machines, the kworker of iscsi created always jump around across node boundaries. If it work on the different node even different cpu package with the softirq of network interface, memcpy with in iscsi_tcp_segment_recv will be slow down, and iscsi got an terrible performance.
In this patch, we trace the cpu of softirq, and tell queue_work_on to execute iscsi_xmitworker on the same NUMA node.
Signed-off-by: Biaoxiang Ye yebiaoxiang@huawei.com Signed-off-by: shaodenghui shaodenghui@huawei.com --- drivers/scsi/iscsi_tcp.c | 13 +++++++++++++ drivers/scsi/libiscsi.c | 24 ++++++++++++++++++++++-- include/scsi/libiscsi.h | 1 + 3 files changed, 36 insertions(+), 2 deletions(-)
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 8e14cea15f98..f7ae9de005ec 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -170,6 +170,9 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk) struct iscsi_sw_tcp_conn *tcp_sw_conn; struct iscsi_tcp_conn *tcp_conn; struct iscsi_conn *conn; +#ifdef KWORKER_NUMA_AFFINITY + int current_cpu; +#endif
trace_sk_data_ready(sk);
@@ -180,6 +183,16 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk) return; } tcp_conn = conn->dd_data; + +#ifdef KWORKER_NUMA_AFFINITY + /* save intimate cpu when in softirq */ + if (!sock_owned_by_user_nocheck(sk)) { + current_cpu = smp_processor_id(); + if (conn->intimate_cpu != current_cpu) + conn->intimate_cpu = current_cpu; + } +#endif + tcp_sw_conn = tcp_conn->dd_data;
if (tcp_sw_conn->queue_recv) diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 0fda8905eabd..edb732d60c90 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -89,9 +89,20 @@ inline void iscsi_conn_queue_xmit(struct iscsi_conn *conn) { struct Scsi_Host *shost = conn->session->host; struct iscsi_host *ihost = shost_priv(shost); +#ifdef KWORKER_NUMA_AFFINITY + int intimate_cpu = conn->intimate_cpu;
+ if (ihost->workq) { + /* we expect it to be excuted on the same numa of the intimate cpu */ + if ((intimate_cpu >= 0) && cpu_possible(intimate_cpu)) + queue_work_on(intimate_cpu, ihost->workq, &conn->xmitwork); + else + queue_work(ihost->workq, &conn->xmitwork); + } +#else if (ihost->workq) queue_work(ihost->workq, &conn->xmitwork); +#endif } EXPORT_SYMBOL_GPL(iscsi_conn_queue_xmit);
@@ -2907,9 +2918,15 @@ struct Scsi_Host *iscsi_host_alloc(const struct scsi_host_template *sht, ihost = shost_priv(shost);
if (xmit_can_sleep) { +#ifdef KWORKER_NUMA_AFFINITY + /* this kind of workqueue only support single work */ + ihost->workq = alloc_ordered_workqueue("iscsi_q_%d", __WQ_LEGACY | WQ_MEM_RECLAIM | + __WQ_DYNAMIC, shost->host_no); +#else ihost->workq = alloc_workqueue("iscsi_q_%d", - WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, - 1, shost->host_no); + WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, + 1, shost->host_no); +#endif if (!ihost->workq) goto free_host; } @@ -3190,6 +3207,9 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, conn->c_stage = ISCSI_CONN_INITIAL_STAGE; conn->id = conn_idx; conn->exp_statsn = 0; +#ifdef KWORKER_NUMA_AFFINITY + conn->intimate_cpu = -1; +#endif
timer_setup(&conn->transport_timer, iscsi_check_transport_timeouts, 0);
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index d253b82e973e..ead17d2224a1 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -266,6 +266,7 @@ struct iscsi_conn { /* custom statistics */ uint32_t eh_abort_cnt; uint32_t fmr_unalign_cnt; + int intimate_cpu;
KABI_RESERVE(1) KABI_RESERVE(2)