mailweb.openeuler.org
Manage this list

Keyboard Shortcuts

Thread View

  • j: Next unread message
  • k: Previous unread message
  • j a: Jump to all threads
  • j l: Jump to MailingList overview

Compass-ci

Threads by month
  • ----- 2025 -----
  • May
  • April
  • March
  • February
  • January
  • ----- 2024 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2023 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2022 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2021 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2020 -----
  • December
  • November
  • October
  • September
compass-ci@openeuler.org

  • 5230 discussions
[PATCH compass-ci] sched: refactor sched class for the lkp cluster sync
by Ren Wen 09 Nov '20

09 Nov '20
According to scheduler.cr API to refactor sched class. Extract request_cluster_state function from sched.cr to request_cluster_state.cr. Signed-off-by: Ren Wen <15991987063(a)163.com> --- src/lib/sched.cr | 108 +----------------------- src/scheduler/request_cluster_state.cr | 111 +++++++++++++++++++++++++ 2 files changed, 112 insertions(+), 107 deletions(-) create mode 100644 src/scheduler/request_cluster_state.cr diff --git a/src/lib/sched.cr b/src/lib/sched.cr index 3709cb1..077b071 100644 --- a/src/lib/sched.cr +++ b/src/lib/sched.cr @@ -15,6 +15,7 @@ require "../scheduler/elasticsearch_client" require "../scheduler/find_job_boot" require "../scheduler/find_next_job_boot" +require "../scheduler/request_cluster_state" class Sched property es @@ -49,113 +50,6 @@ class Sched @redis.hash_del("sched/host2queues", hostname) end - # return: - # Hash(String, Hash(String, String)) - def get_cluster_state(cluster_id) - cluster_state = @redis.hash_get("sched/cluster_state", cluster_id) - if cluster_state - cluster_state = Hash(String, Hash(String, String)).from_json(cluster_state) - else - cluster_state = Hash(String, Hash(String, String)).new - end - return cluster_state - end - - # Update job info according to cluster id. - def update_cluster_state(cluster_id, job_id, job_info : Hash(String, String)) - cluster_state = get_cluster_state(cluster_id) - if cluster_state[job_id]? - cluster_state[job_id].merge!(job_info) - @redis.hash_set("sched/cluster_state", cluster_id, cluster_state.to_json) - end - end - - # Return response according to different request states. - # all request states: - # wait_ready | abort | failed | finished | wait_finish | - # write_state | roles_ip - def request_cluster_state(env) - request_state = env.params.query["state"] - job_id = env.params.query["job_id"] - cluster_id = @redis.hash_get("sched/id2cluster", job_id).not_nil! - cluster_state = "" - - states = {"abort" => "abort", - "finished" => "finish", - "failed" => "abort", - "wait_ready" => "ready", - "wait_finish" => "finish"} - - case request_state - when "abort", "finished", "failed" - # update node state only - update_cluster_state(cluster_id, job_id, {"state" => states[request_state]}) - when "wait_ready" - update_cluster_state(cluster_id, job_id, {"state" => states[request_state]}) - @block_helper.block_until_finished(cluster_id) { - cluster_state = sync_cluster_state(cluster_id, job_id, states[request_state]) - cluster_state == "ready" || cluster_state == "abort" - } - - return cluster_state - when "wait_finish" - update_cluster_state(cluster_id, job_id, {"state" => states[request_state]}) - while 1 - sleep(10) - cluster_state = sync_cluster_state(cluster_id, job_id, states[request_state]) - break if (cluster_state == "finish" || cluster_state == "abort") - end - - return cluster_state - when "write_state" - node_roles = env.params.query["node_roles"] - node_ip = env.params.query["ip"] - direct_ips = env.params.query["direct_ips"] - direct_macs = env.params.query["direct_macs"] - - job_info = {"roles" => node_roles, - "ip" => node_ip, - "direct_ips" => direct_ips, - "direct_macs" => direct_macs} - update_cluster_state(cluster_id, job_id, job_info) - when "roles_ip" - role = "server" - role_state = get_role_state(cluster_id, role) - raise "Missing #{role} state in cluster state" unless role_state - return "server=#{role_state["ip"]}\n" \ - "direct_server_ips=#{role_state["direct_ips"]}" - end - - # show cluster state - return @redis.hash_get("sched/cluster_state", cluster_id) - end - - # get the node state of role from cluster_state - private def get_role_state(cluster_id, role) - cluster_state = get_cluster_state(cluster_id) - cluster_state.each_value do |role_state| - return role_state if role_state["roles"] == role - end - end - - # node_state: "finish" | "ready" - def sync_cluster_state(cluster_id, job_id, node_state) - cluster_state = get_cluster_state(cluster_id) - cluster_state.each_value do |host_state| - state = host_state["state"] - return "abort" if state == "abort" - end - - cluster_state.each_value do |host_state| - state = host_state["state"] - next if "#{state}" == "#{node_state}" - return "retry" - end - - # cluster state is node state when all nodes are normal - return node_state - end - # get cluster config using own lkp_src cluster file, # a hash type will be returned def get_cluster_config(cluster_file, lkp_initrd_user, os_arch) diff --git a/src/scheduler/request_cluster_state.cr b/src/scheduler/request_cluster_state.cr new file mode 100644 index 0000000..07ba6fd --- /dev/null +++ b/src/scheduler/request_cluster_state.cr @@ -0,0 +1,111 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +class Sched + # return: + # Hash(String, Hash(String, String)) + def get_cluster_state(cluster_id) + cluster_state = @redis.hash_get("sched/cluster_state", cluster_id) + if cluster_state + cluster_state = Hash(String, Hash(String, String)).from_json(cluster_state) + else + cluster_state = Hash(String, Hash(String, String)).new + end + return cluster_state + end + + # Update job info according to cluster id. + def update_cluster_state(cluster_id, job_id, job_info : Hash(String, String)) + cluster_state = get_cluster_state(cluster_id) + if cluster_state[job_id]? + cluster_state[job_id].merge!(job_info) + @redis.hash_set("sched/cluster_state", cluster_id, cluster_state.to_json) + end + end + + # Return response according to different request states. + # all request states: + # wait_ready | abort | failed | finished | wait_finish | + # write_state | roles_ip + def request_cluster_state(env) + request_state = env.params.query["state"] + job_id = env.params.query["job_id"] + cluster_id = @redis.hash_get("sched/id2cluster", job_id).not_nil! + cluster_state = "" + + states = {"abort" => "abort", + "finished" => "finish", + "failed" => "abort", + "wait_ready" => "ready", + "wait_finish" => "finish"} + + case request_state + when "abort", "finished", "failed" + # update node state only + update_cluster_state(cluster_id, job_id, {"state" => states[request_state]}) + when "wait_ready" + update_cluster_state(cluster_id, job_id, {"state" => states[request_state]}) + @block_helper.block_until_finished(cluster_id) { + cluster_state = sync_cluster_state(cluster_id, job_id, states[request_state]) + cluster_state == "ready" || cluster_state == "abort" + } + + return cluster_state + when "wait_finish" + update_cluster_state(cluster_id, job_id, {"state" => states[request_state]}) + while 1 + sleep(10) + cluster_state = sync_cluster_state(cluster_id, job_id, states[request_state]) + break if (cluster_state == "finish" || cluster_state == "abort") + end + + return cluster_state + when "write_state" + node_roles = env.params.query["node_roles"] + node_ip = env.params.query["ip"] + direct_ips = env.params.query["direct_ips"] + direct_macs = env.params.query["direct_macs"] + + job_info = {"roles" => node_roles, + "ip" => node_ip, + "direct_ips" => direct_ips, + "direct_macs" => direct_macs} + update_cluster_state(cluster_id, job_id, job_info) + when "roles_ip" + role = "server" + role_state = get_role_state(cluster_id, role) + raise "Missing #{role} state in cluster state" unless role_state + return "server=#{role_state["ip"]}\n" \ + "direct_server_ips=#{role_state["direct_ips"]}" + end + + # show cluster state + return @redis.hash_get("sched/cluster_state", cluster_id) + end + + # get the node state of role from cluster_state + private def get_role_state(cluster_id, role) + cluster_state = get_cluster_state(cluster_id) + cluster_state.each_value do |role_state| + return role_state if role_state["roles"] == role + end + end + + # node_state: "finish" | "ready" + def sync_cluster_state(cluster_id, job_id, node_state) + cluster_state = get_cluster_state(cluster_id) + cluster_state.each_value do |host_state| + state = host_state["state"] + return "abort" if state == "abort" + end + + cluster_state.each_value do |host_state| + state = host_state["state"] + next if "#{state}" == "#{node_state}" + return "retry" + end + + # cluster state is node state when all nodes are normal + return node_state + end +end -- 2.23.0
2 2
0 0
[PATCH v2 compass-ci] sched: refactor sched class for the lkp cluster sync
by Ren Wen 09 Nov '20

09 Nov '20
According to scheduler.cr API to refactor sched class. Extract request_cluster_state function from sched.cr to request_cluster_state.cr Signed-off-by: Ren Wen <15991987063(a)163.com> --- src/lib/sched.cr | 108 +----------------------- src/scheduler/request_cluster_state.cr | 111 +++++++++++++++++++++++++ 2 files changed, 112 insertions(+), 107 deletions(-) create mode 100644 src/scheduler/request_cluster_state.cr diff --git a/src/lib/sched.cr b/src/lib/sched.cr index 6aba6cd..1b6eb8a 100644 --- a/src/lib/sched.cr +++ b/src/lib/sched.cr @@ -16,6 +16,7 @@ require "../scheduler/elasticsearch_client" require "../scheduler/find_job_boot" require "../scheduler/find_next_job_boot" require "../scheduler/close_job" +require "../scheduler/request_cluster_state" class Sched property es @@ -50,113 +51,6 @@ class Sched @redis.hash_del("sched/host2queues", hostname) end - # return: - # Hash(String, Hash(String, String)) - def get_cluster_state(cluster_id) - cluster_state = @redis.hash_get("sched/cluster_state", cluster_id) - if cluster_state - cluster_state = Hash(String, Hash(String, String)).from_json(cluster_state) - else - cluster_state = Hash(String, Hash(String, String)).new - end - return cluster_state - end - - # Update job info according to cluster id. - def update_cluster_state(cluster_id, job_id, job_info : Hash(String, String)) - cluster_state = get_cluster_state(cluster_id) - if cluster_state[job_id]? - cluster_state[job_id].merge!(job_info) - @redis.hash_set("sched/cluster_state", cluster_id, cluster_state.to_json) - end - end - - # Return response according to different request states. - # all request states: - # wait_ready | abort | failed | finished | wait_finish | - # write_state | roles_ip - def request_cluster_state(env) - request_state = env.params.query["state"] - job_id = env.params.query["job_id"] - cluster_id = @redis.hash_get("sched/id2cluster", job_id).not_nil! - cluster_state = "" - - states = {"abort" => "abort", - "finished" => "finish", - "failed" => "abort", - "wait_ready" => "ready", - "wait_finish" => "finish"} - - case request_state - when "abort", "finished", "failed" - # update node state only - update_cluster_state(cluster_id, job_id, {"state" => states[request_state]}) - when "wait_ready" - update_cluster_state(cluster_id, job_id, {"state" => states[request_state]}) - @block_helper.block_until_finished(cluster_id) { - cluster_state = sync_cluster_state(cluster_id, job_id, states[request_state]) - cluster_state == "ready" || cluster_state == "abort" - } - - return cluster_state - when "wait_finish" - update_cluster_state(cluster_id, job_id, {"state" => states[request_state]}) - while 1 - sleep(10) - cluster_state = sync_cluster_state(cluster_id, job_id, states[request_state]) - break if (cluster_state == "finish" || cluster_state == "abort") - end - - return cluster_state - when "write_state" - node_roles = env.params.query["node_roles"] - node_ip = env.params.query["ip"] - direct_ips = env.params.query["direct_ips"] - direct_macs = env.params.query["direct_macs"] - - job_info = {"roles" => node_roles, - "ip" => node_ip, - "direct_ips" => direct_ips, - "direct_macs" => direct_macs} - update_cluster_state(cluster_id, job_id, job_info) - when "roles_ip" - role = "server" - role_state = get_role_state(cluster_id, role) - raise "Missing #{role} state in cluster state" unless role_state - return "server=#{role_state["ip"]}\n" \ - "direct_server_ips=#{role_state["direct_ips"]}" - end - - # show cluster state - return @redis.hash_get("sched/cluster_state", cluster_id) - end - - # get the node state of role from cluster_state - private def get_role_state(cluster_id, role) - cluster_state = get_cluster_state(cluster_id) - cluster_state.each_value do |role_state| - return role_state if role_state["roles"] == role - end - end - - # node_state: "finish" | "ready" - def sync_cluster_state(cluster_id, job_id, node_state) - cluster_state = get_cluster_state(cluster_id) - cluster_state.each_value do |host_state| - state = host_state["state"] - return "abort" if state == "abort" - end - - cluster_state.each_value do |host_state| - state = host_state["state"] - next if "#{state}" == "#{node_state}" - return "retry" - end - - # cluster state is node state when all nodes are normal - return node_state - end - # get cluster config using own lkp_src cluster file, # a hash type will be returned def get_cluster_config(cluster_file, lkp_initrd_user, os_arch) diff --git a/src/scheduler/request_cluster_state.cr b/src/scheduler/request_cluster_state.cr new file mode 100644 index 0000000..ac6cb8e --- /dev/null +++ b/src/scheduler/request_cluster_state.cr @@ -0,0 +1,111 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +class Sched + # Return response according to different request states. + # all request states: + # wait_ready | abort | failed | finished | wait_finish | + # write_state | roles_ip + def request_cluster_state(env) + request_state = env.params.query["state"] + job_id = env.params.query["job_id"] + cluster_id = @redis.hash_get("sched/id2cluster", job_id).not_nil! + cluster_state = "" + + states = {"abort" => "abort", + "finished" => "finish", + "failed" => "abort", + "wait_ready" => "ready", + "wait_finish" => "finish"} + + case request_state + when "abort", "finished", "failed" + # update node state only + update_cluster_state(cluster_id, job_id, {"state" => states[request_state]}) + when "wait_ready" + update_cluster_state(cluster_id, job_id, {"state" => states[request_state]}) + @block_helper.block_until_finished(cluster_id) { + cluster_state = sync_cluster_state(cluster_id, job_id, states[request_state]) + cluster_state == "ready" || cluster_state == "abort" + } + + return cluster_state + when "wait_finish" + update_cluster_state(cluster_id, job_id, {"state" => states[request_state]}) + while 1 + sleep(10) + cluster_state = sync_cluster_state(cluster_id, job_id, states[request_state]) + break if (cluster_state == "finish" || cluster_state == "abort") + end + + return cluster_state + when "write_state" + node_roles = env.params.query["node_roles"] + node_ip = env.params.query["ip"] + direct_ips = env.params.query["direct_ips"] + direct_macs = env.params.query["direct_macs"] + + job_info = {"roles" => node_roles, + "ip" => node_ip, + "direct_ips" => direct_ips, + "direct_macs" => direct_macs} + update_cluster_state(cluster_id, job_id, job_info) + when "roles_ip" + role = "server" + role_state = get_role_state(cluster_id, role) + raise "Missing #{role} state in cluster state" unless role_state + return "server=#{role_state["ip"]}\n" \ + "direct_server_ips=#{role_state["direct_ips"]}" + end + + # show cluster state + return @redis.hash_get("sched/cluster_state", cluster_id) + end + + # node_state: "finish" | "ready" + def sync_cluster_state(cluster_id, job_id, node_state) + cluster_state = get_cluster_state(cluster_id) + cluster_state.each_value do |host_state| + state = host_state["state"] + return "abort" if state == "abort" + end + + cluster_state.each_value do |host_state| + state = host_state["state"] + next if "#{state}" == "#{node_state}" + return "retry" + end + + # cluster state is node state when all nodes are normal + return node_state + end + + # return: + # Hash(String, Hash(String, String)) + def get_cluster_state(cluster_id) + cluster_state = @redis.hash_get("sched/cluster_state", cluster_id) + if cluster_state + cluster_state = Hash(String, Hash(String, String)).from_json(cluster_state) + else + cluster_state = Hash(String, Hash(String, String)).new + end + return cluster_state + end + + # Update job info according to cluster id. + def update_cluster_state(cluster_id, job_id, job_info : Hash(String, String)) + cluster_state = get_cluster_state(cluster_id) + if cluster_state[job_id]? + cluster_state[job_id].merge!(job_info) + @redis.hash_set("sched/cluster_state", cluster_id, cluster_state.to_json) + end + end + + # get the node state of role from cluster_state + private def get_role_state(cluster_id, role) + cluster_state = get_cluster_state(cluster_id) + cluster_state.each_value do |role_state| + return role_state if role_state["roles"] == role + end + end +end -- 2.23.0
1 0
0 0
[PATCH v2 compass-ci] monitoring/filter.cr: query key support regular match
by Wu Zhende 09 Nov '20

09 Nov '20
[Why] Can use regular match in query's keys. [Example] 1: monitor job.*= query=>{"job.*":null} if one log's key include "job", will be matched. 2: monitor .*= this can match all logs Signed-off-by: Wu Zhende <wuzhende666(a)163.com> --- src/monitoring/filter.cr | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/monitoring/filter.cr b/src/monitoring/filter.cr index 88702b6..8e672e0 100644 --- a/src/monitoring/filter.cr +++ b/src/monitoring/filter.cr @@ -56,7 +56,8 @@ class Filter def match_query(query : Hash(String, JSON::Any), msg : Hash(String, JSON::Any)) query.each do |key, value| - return false unless msg.has_key?(key) + key = find_real_key(key, msg.keys) unless msg.has_key?(key) + return false unless key values = value.as_a next if values.includes?(nil) || values.includes?(msg[key]?) @@ -66,6 +67,12 @@ class Filter return true end + private def find_real_key(rule, keys) + keys.each do |key| + return key if key.to_s =~ /#{rule}/ + end + end + private def regular_match(rules, string) rules.each do |rule| return true if string =~ /#{rule}/ -- 2.23.0
2 1
0 0
[PATCH v8 lkp-tests 1/2] jobs/iozone-bs.yaml: combine multiple test parameter to single
by Lu Kaiyi 09 Nov '20

09 Nov '20
[why] avoid explosion of parameter for iozone-bs.yaml [how] combine multiple test parameter to single Signed-off-by: Lu Kaiyi <2392863668(a)qq.com> --- jobs/iozone-bs.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/jobs/iozone-bs.yaml b/jobs/iozone-bs.yaml index e2cd9f48..53f1ac46 100644 --- a/jobs/iozone-bs.yaml +++ b/jobs/iozone-bs.yaml @@ -2,9 +2,7 @@ suite: iozone category: benchmark file_size: 4g -write_rewrite: true -read_reread: true -random_read_write: true +test: write, read, rand_rw block_size: - 64k -- 2.23.0
1 0
0 0
[PATCH lkp-tests] lib/monitor: query key support regular match
by Wu Zhende 09 Nov '20

09 Nov '20
[Why] Enables more flexible monitoring conditions. When I use "monitor job.*=", will get "query=>{"job":{"*":null}}". It's not what I want. I want "query=>{"job.*": null}". Signed-off-by: Wu Zhende <wuzhende666(a)163.com> --- lib/monitor.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/monitor.rb b/lib/monitor.rb index 56283e10..67c2389a 100755 --- a/lib/monitor.rb +++ b/lib/monitor.rb @@ -51,7 +51,7 @@ class Monitor def merge_overrides return if @overrides.empty? - revise_hash(@query, @overrides, true) + @query.merge!(@overrides) end def field_check -- 2.23.0
2 1
0 0
[PATCH v1 compass-ci] sched: refactor sched class for the close job function
by Cao Xueliang 09 Nov '20

09 Nov '20
According to scheduler.cr API to refactor sched class. Extract close_job function from sched.cr to close_job.cr Signed-off-by: Cao Xueliang <caoxl78320(a)163.com> --- src/lib/sched.cr | 28 +--------------------------- src/scheduler/close_job.cr | 31 +++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 27 deletions(-) create mode 100644 src/scheduler/close_job.cr diff --git a/src/lib/sched.cr b/src/lib/sched.cr index 3709cb1..6aba6cd 100644 --- a/src/lib/sched.cr +++ b/src/lib/sched.cr @@ -15,6 +15,7 @@ require "../scheduler/elasticsearch_client" require "../scheduler/find_job_boot" require "../scheduler/find_next_job_boot" +require "../scheduler/close_job" class Sched property es @@ -404,33 +405,6 @@ class Sched @redis.hash_set("sched/tbox2ssh_port", testbox, ssh_port) end - def delete_access_key_file(job : Job) - File.delete(job.access_key_file) if File.exists?(job.access_key_file) - end - - def close_job(job_id : String) - job = @redis.get_job(job_id) - - delete_access_key_file(job) if job - - response = @es.set_job_content(job) - if response["_id"] == nil - # es update fail, raise exception - raise "es set job content fail!" - end - - response = @task_queue.hand_over_task( - "sched/#{job.queue}", "extract_stats", job_id - ) - if response[0] != 201 - raise "#{response}" - end - - @redis.remove_finished_job(job_id) - - return %({"job_id": "#{job_id}", "job_state": "complete"}) - end - private def query_consumable_keys(shortest_queue_name) keys = [] of String search = "sched/" + shortest_queue_name + "*" diff --git a/src/scheduler/close_job.cr b/src/scheduler/close_job.cr new file mode 100644 index 0000000..d071d69 --- /dev/null +++ b/src/scheduler/close_job.cr @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +class Sched + def close_job(job_id : String) + job = @redis.get_job(job_id) + + delete_access_key_file(job) if job + + response = @es.set_job_content(job) + if response["_id"] == nil + # es update fail, raise exception + raise "es set job content fail!" + end + + response = @task_queue.hand_over_task( + "sched/#{job.queue}", "extract_stats", job_id + ) + if response[0] != 201 + raise "#{response}" + end + + @redis.remove_finished_job(job_id) + + return %({"job_id": "#{job_id}", "job_state": "complete"}) + end + + def delete_access_key_file(job : Job) + File.delete(job.access_key_file) if File.exists?(job.access_key_file) + end +end -- 2.23.0
2 1
0 0
[PATCH compass-ci] sched: refactor sched class for the close job function
by Cao Xueliang 09 Nov '20

09 Nov '20
According to scheduler.cr API to refactor sched class. Extract close_job function from sched.cr to close_job.cr Signed-off-by: Cao Xueliang <caoxl78320(a)163.com> --- src/lib/sched.cr | 28 +--------------------------- src/scheduler/close_job.cr | 31 +++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 27 deletions(-) create mode 100644 src/scheduler/close_job.cr diff --git a/src/lib/sched.cr b/src/lib/sched.cr index 3709cb1..6aba6cd 100644 --- a/src/lib/sched.cr +++ b/src/lib/sched.cr @@ -15,6 +15,7 @@ require "../scheduler/elasticsearch_client" require "../scheduler/find_job_boot" require "../scheduler/find_next_job_boot" +require "../scheduler/close_job" class Sched property es @@ -404,33 +405,6 @@ class Sched @redis.hash_set("sched/tbox2ssh_port", testbox, ssh_port) end - def delete_access_key_file(job : Job) - File.delete(job.access_key_file) if File.exists?(job.access_key_file) - end - - def close_job(job_id : String) - job = @redis.get_job(job_id) - - delete_access_key_file(job) if job - - response = @es.set_job_content(job) - if response["_id"] == nil - # es update fail, raise exception - raise "es set job content fail!" - end - - response = @task_queue.hand_over_task( - "sched/#{job.queue}", "extract_stats", job_id - ) - if response[0] != 201 - raise "#{response}" - end - - @redis.remove_finished_job(job_id) - - return %({"job_id": "#{job_id}", "job_state": "complete"}) - end - private def query_consumable_keys(shortest_queue_name) keys = [] of String search = "sched/" + shortest_queue_name + "*" diff --git a/src/scheduler/close_job.cr b/src/scheduler/close_job.cr new file mode 100644 index 0000000..d071d69 --- /dev/null +++ b/src/scheduler/close_job.cr @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. + +class Sched + def close_job(job_id : String) + job = @redis.get_job(job_id) + + delete_access_key_file(job) if job + + response = @es.set_job_content(job) + if response["_id"] == nil + # es update fail, raise exception + raise "es set job content fail!" + end + + response = @task_queue.hand_over_task( + "sched/#{job.queue}", "extract_stats", job_id + ) + if response[0] != 201 + raise "#{response}" + end + + @redis.remove_finished_job(job_id) + + return %({"job_id": "#{job_id}", "job_state": "complete"}) + end + + def delete_access_key_file(job : Job) + File.delete(job.access_key_file) if File.exists?(job.access_key_file) + end +end -- 2.23.0
1 1
0 0
[PATCH compass-ci] container: fix failed to build kibana images
by Liu Yinsi 09 Nov '20

09 Nov '20
[why] when build kibana images in x86_machine, error message: [root@localhost kibana]# ./build Sending build context to Docker daemon 5.12kB Step 1/3 : FROM gagara/kibana-oss-arm64:7.6.2 7.6.2: Pulling from gagara/kibana-oss-arm64 38163f410fa0: Pull complete 69a4d016f221: Pull complete 95e6c6e7c9ca: Pull complete d13f429dd982: Pull complete 508bb3330fb2: Pull complete 9634e726f1b6: Pull complete 9c26c37850c8: Pull complete 0d0ad8467060: Pull complete 940f92726f8b: Pull complete Digest: sha256:541632b7e9780a007f8a8be82ac8853ddcebcb04a596c00500b73f77eacfbd16 Status: Downloaded newer image for gagara/kibana-oss-arm64:7.6.2 ---> f482a0472f78 Step 2/3 : MAINTAINER Wu Zhende <wuzhende666(a)163.com> ---> Running in cfa86d8ce976 Removing intermediate container cfa86d8ce976 ---> 3be6c5f24d4b Step 3/3 : RUN sed -i 's/server.host: "0"/server.host: "0.0.0.0"/' config/kibana.yml ---> Running in ff455f66df8b standard_init_linux.go:220: exec user process caused "exec format error" libcontainer: container start initialization failed: standard_init_linux.go:220: exec user process caused "exec format error" The command '/bin/sh -c sed -i 's/server.host: "0"/server.host: "0.0.0.0"/' config/kibana.yml' returned a non-zero code: 1 because arm base image not support to build in x86 machine. [how] 1. use images dict to store arm and x86 base images 2. use $(arch) to choose base image according different according to different system architecture Signed-off-by: Liu Yinsi <liuyinsi(a)163.com> --- container/kibana/Dockerfile | 4 +++- container/kibana/build | 8 +++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/container/kibana/Dockerfile b/container/kibana/Dockerfile index 35802fe..6e0dba0 100644 --- a/container/kibana/Dockerfile +++ b/container/kibana/Dockerfile @@ -1,7 +1,9 @@ # SPDX-License-Identifier: MulanPSL-2.0+ # Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. -FROM gagara/kibana-oss-arm64:7.6.2 +ARG BASE_IMAGE + +FROM BASE_IMAGE # docker image borrowed from hub.docker.com/r/gagara/kibana-oss-arm64 diff --git a/container/kibana/build b/container/kibana/build index a7e4717..60fdea2 100755 --- a/container/kibana/build +++ b/container/kibana/build @@ -3,4 +3,10 @@ # Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. # frozen_string_literal: true -system 'docker build -t kibana:7.6.2 .' +BASE_IMAGE_DICT = {'aarch64'=>'gagara/kibana-oss-arm64:7.6.2', + 'x86_64'=>'kibana:7.6.2' +} + +BASE_IMAGE = BASE_IMAGE_DICT[%x(arch).chomp] + +system "docker build -t kibana:7.6.2 --build-arg BASE_IMAGE=#{BASE_IMAGE} ." -- 2.23.0
2 1
0 0
[PATCH v3 compass-ci] container: fix failed to build archlinux images in x86_64 machine
by Liu Yinsi 09 Nov '20

09 Nov '20
[why] when build archlinux images in x86_64 machine error: error: failed retrieving file 'core.db' from mirrors.tuna.tsinghua.edu.cn : The requested URL returned error:404 error: failed retrieving file 'core.db' from mirrors.163.com : The requested URL returned error: 404 error: failed retrieving file 'core.db' from mirror.archlinuxarm.org : The requested URL returned error: 404 error: failed to update core (failed to retrieve some files) error: failed retrieving file 'extra.db' from mirrors.tuna.tsinghua.edu.cn : The requested URL returned error:404 error: failed retrieving file 'extra.db' from mirrors.163.com : The requested URL returned error: 404 error: failed retrieving file 'extra.db' from mirror.archlinuxarm.org : Resolving timed out after 10000 milliseconds error: failed to update extra (download library error) error: failed retrieving file 'community.db' from mirrors.tuna.tsinghua.edu.cn : The requested URL returned error: 404 error: failed retrieving file 'community.db' from mirrors.163.com : The requested URL returned error: 404 error: failed retrieving file 'community.db' from mirror.archlinuxarm.org : The requested URL returned error:404 error: failed to update community (failed to retrieve some files) error: failed to synchronize all databases The command '/bin/sh -c pacman --needed --noprogressbar --noconfirm -Syu && pacman --needed --noprogressbar --noconfirm -S bash zsh git openssh rsync make gcc tzdata sudo coreutils util-linux vim gawk' returned a non-zero code: 1 because archlinux mirror not support x86_64 machine. [how] root/etc/pacman.d/mirrorlist => root/aarch64/etc/pacman.d/mirrorlist root/x86_64/etc/pacman.d/mirrorlist use ARCH=$(arch) to choose mirrorlist according to different system architecture. Signed-off-by: Liu Yinsi <liuyinsi(a)163.com> --- container/archlinux/Dockerfile | 6 +++++- container/archlinux/build | 2 +- .../archlinux/root/{ => aarch64}/etc/pacman.d/mirrorlist | 0 container/archlinux/root/x86_64/etc/pacman.d/mirrorlist | 1 + 4 files changed, 7 insertions(+), 2 deletions(-) rename container/archlinux/root/{ => aarch64}/etc/pacman.d/mirrorlist (100%) create mode 100644 container/archlinux/root/x86_64/etc/pacman.d/mirrorlist diff --git a/container/archlinux/Dockerfile b/container/archlinux/Dockerfile index c0f05d3..1f80ae0 100644 --- a/container/archlinux/Dockerfile +++ b/container/archlinux/Dockerfile @@ -5,7 +5,11 @@ FROM lopsided/archlinux MAINTAINER Wu Fenguang <wfg(a)mail.ustc.edu.cn> -COPY root / +ARG ARCH + +COPY root/$ARCH / + RUN chmod 755 /etc + RUN pacman --needed --noprogressbar --noconfirm -Syu && \ pacman --needed --noprogressbar --noconfirm -S bash zsh git openssh rsync make gcc tzdata sudo coreutils util-linux vim gawk diff --git a/container/archlinux/build b/container/archlinux/build index 81feda2..9749489 100755 --- a/container/archlinux/build +++ b/container/archlinux/build @@ -2,4 +2,4 @@ # SPDX-License-Identifier: MulanPSL-2.0+ # Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. -docker build -t archlinux:testbed . +docker build --build-arg ARCH=$(arch) -t archlinux:testbed . diff --git a/container/archlinux/root/etc/pacman.d/mirrorlist b/container/archlinux/root/aarch64/etc/pacman.d/mirrorlist similarity index 100% rename from container/archlinux/root/etc/pacman.d/mirrorlist rename to container/archlinux/root/aarch64/etc/pacman.d/mirrorlist diff --git a/container/archlinux/root/x86_64/etc/pacman.d/mirrorlist b/container/archlinux/root/x86_64/etc/pacman.d/mirrorlist new file mode 100644 index 0000000..556fac8 --- /dev/null +++ b/container/archlinux/root/x86_64/etc/pacman.d/mirrorlist @@ -0,0 +1 @@ +Server = http://mirrors.tuna.tsinghua.edu.cn/archlinux/$repo/os/$arch -- 2.23.0
1 0
0 0
[PATCH v5 compass-ci] compare values by each metrics
by Lu Weitao 09 Nov '20

09 Nov '20
compare values by each metrics based on groups matrices, and format compare result as echart data_set background: To support compare with user-defined template feature, the work-flow is: load compare_template.yaml --> query_results(ES) ---> auto group jobs_list ---> create groups_matrices ---> compare_values by each metrics ---> format/show results current patch do: compare_values by each metrics ---> format/show results Signed-off-by: Lu Weitao <luweitaobe(a)163.com> Signed-off-by: Lu Weitao <luweitaobe(a)163.com> --- lib/compare_data_format.rb | 18 +++++++ lib/compare_matrixes.rb | 103 +++++++++++++++++++++++++++++++++++++ 2 files changed, 121 insertions(+) create mode 100644 lib/compare_data_format.rb diff --git a/lib/compare_data_format.rb b/lib/compare_data_format.rb new file mode 100644 index 0000000..3d82550 --- /dev/null +++ b/lib/compare_data_format.rb @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: MulanPSL-2.0+ or GPL-2.0 +# Copyright (c) 2020 Huawei Technologies Co., Ltd. All rights reserved. +# frozen_string_literal: true + +# ---------------------------------------------------------------------------------------------------- +# format compare results for a specific format +# + +def format_for_echart(metrics_compare_results, template_params) + echart_result = {} + echart_result['title'] = template_params['title'] + echart_result['unit'] = template_params['unit'] + x_params = template_params['x_params'] + echart_result['x_name'] = x_params.join('|') if x_params + echart_result['tables'] = metrics_compare_results + + echart_result +end diff --git a/lib/compare_matrixes.rb b/lib/compare_matrixes.rb index 078028a..119d42d 100644 --- a/lib/compare_matrixes.rb +++ b/lib/compare_matrixes.rb @@ -6,6 +6,7 @@ LKP_SRC ||= ENV['LKP_SRC'] || File.dirname(__dir__) require 'set' require 'json/ext' require_relative 'themes' +require_relative './compare_data_format.rb' require "#{LKP_SRC}/lib/stats" FAILURE_PATTERNS = IO.read("#{LKP_SRC}/etc/failure").split("\n") @@ -399,6 +400,108 @@ def compare_group_matrices(group_matrices, options) result_str end +# input: groups_matrices +# { +# group_key_1 => { +# dimension_1 => matrix_1, (openeuler 20.03) +# dimension_2 => matrix_2, (openeuler 20.09) +# dimension_3 => matrix_3, (centos 7.6) +# }, +# group_key_2 => {...} +# } +# +# output: compare_metrics_values +# { +# group_key_1 => { +# metric_1 => { +# 'average' => { +# 'dimension_1' => xxx +# 'dimension_2' => xxx +# 'dimension_3' => xxx +# }, +# 'standard_deviation' => { +# 'dimension_1' => xxx +# 'dimension_2' => xxx +# 'dimension_3' => xxx +# }, +# 'change' => { +# 'dimension_2 vs dimension_1' => xxx +# 'dimension_3 vs dimension_1' => xxx +# 'dimension_3 vs dimension_2' => xxx +# } +# }, +# metric_2 => {...} +# } +# } +def compare_metrics_values(groups_matrices) + metrics_compare_values = {} + groups_matrices.each do |group_key, dimensions| + metrics_compare_values[group_key] = get_metric_values(dimensions) + end + + metrics_compare_values +end + +def get_metric_values(dimensions) + metrics_values = {} + dimensions.each do |dim, matrix| + matrix.each do |metric, values| + assign_metric_values(metrics_values, dim, metric, values) + end + end + assign_metric_change(metrics_values) + + metrics_values +end + +def assign_metric_values(metrics_values, dim, metric, values) + metrics_values[metric] ||= {} + metrics_values[metric]['average'] ||= {} + metrics_values[metric]['standard_deviation'] ||= {} + metric_value = get_values(values, true) + metrics_values[metric]['average'][dim] = metric_value[:average] + metrics_values[metric]['standard_deviation'][dim] = metric_value[:stddev] +end + +def assign_metric_change(metrics_values) + metrics_values.each do |metric, values| + metrics_values[metric]['change'] = {} + next if values['average'].size < 2 + + dimension_list = values['average'].keys + dimension_groups = get_dimensions_combination(dimension_list) + dimension_groups.each do |base_dimension, challenge_dimension| + change = get_compare_value(values['average'][base_dimension], values['average'][challenge_dimension], true) + values['change'] = { "#{challenge_dimension} vs #{base_dimension}" => change } + end + end +end + +# input: dimension_list +# eg: ['openeuler 20.03', 'debian sid', 'centos 7.6'] +# output: Array(base_dimension: String, challenge_dimension: String) +# [ +# ['openeuler 20.03', 'debian sid'], +# ['openeuler 20.03', 'centos 7.6'], +# ['debian sid', 'centos 7.6'] +# ] +def get_dimentions_combination(dimension_list) + dims = [] + dimension_list_size = dimension_list.size + (1..dimension_list_size - 1).each do |i| + (i..dimension_list_size - 1).each do |j| + dims << [dimension_list[i - 1], dimension_list[j]] + end + end + + dims +end + +def show_compare_result(metrics_compare_results, template_params) + echart_results = format_for_echart(metrics_compare_results, template_params) + print JSON.pretty_generate(echart_results) +end + # Format Fields def format_fails_runs(fails, runs) -- 2.23.0
2 2
0 0
  • ← Newer
  • 1
  • ...
  • 418
  • 419
  • 420
  • 421
  • 422
  • 423
  • 424
  • ...
  • 523
  • Older →

HyperKitty Powered by HyperKitty