mirror of
https://github.com/fluencelabs/aquavm
synced 2025-03-15 12:30:50 +00:00
feat(tools): VM-194 performance metering (#440)
* Experimental performance metering * Average on repeated runs with `--repeat` option * Add "version" field to the report The version is got from `air/Cargo.toml`. * Allow disabling preparing binaries with the `--no-prepare-binaries` option. * Human-readable execution time in the report * Add dashboard benchmark * Human-readable text report
This commit is contained in:
parent
dacd4f074b
commit
5fdc8e68ac
@ -28,6 +28,7 @@ exclude = [
|
||||
"air/tests/test_module/integration/security_tetraplets/auth_module",
|
||||
"air/tests/test_module/integration/security_tetraplets/log_storage",
|
||||
"crates/interpreter-wasm",
|
||||
"junk",
|
||||
]
|
||||
|
||||
[profile.release]
|
||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -24,6 +24,7 @@ use air_interpreter_interface::RunParameters;
|
||||
use air_log_targets::RUN_PARAMS;
|
||||
use air_utils::measure;
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub fn execute_air(
|
||||
air: String,
|
||||
prev_data: Vec<u8>,
|
||||
|
301
benches/PERFORMANCE.json
Normal file
301
benches/PERFORMANCE.json
Normal file
@ -0,0 +1,301 @@
|
||||
{
|
||||
"d77ebe8481884bc3b2778c8083f1bf459e548e929edd87041beb14f6b868d35f": {
|
||||
"benches": {
|
||||
"big_values_data": {
|
||||
"comment": "Loading a trace with huge values",
|
||||
"stats": {
|
||||
"air::runner::execute_air": {
|
||||
"common_prefix": "air",
|
||||
"duration": "1.20s",
|
||||
"nested": {
|
||||
"farewell_step::outcome::from_success_result": {
|
||||
"common_prefix": "air::farewell_step::outcome",
|
||||
"duration": "104.61ms",
|
||||
"nested": {
|
||||
"populate_outcome_from_contexts": {
|
||||
"common_prefix": "air::farewell_step::outcome::serde_json",
|
||||
"duration": "102.47ms",
|
||||
"nested": {
|
||||
"to_vec(call_results)": "142.00µs",
|
||||
"to_vec(data)": "96.71ms"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"preparation_step::preparation::prepare": {
|
||||
"common_prefix": "",
|
||||
"duration": "1.09s",
|
||||
"nested": {
|
||||
"air::preparation_step::preparation::make_exec_ctx": "2.79ms",
|
||||
"air_interpreter_data::interpreter_data::serde_json::from_slice": "1.07s",
|
||||
"air_parser::parser::air_parser::parse": "3.66ms"
|
||||
}
|
||||
},
|
||||
"runner::execute": "262.00µs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"total_time": "1.20s"
|
||||
},
|
||||
"dashboard": {
|
||||
"comment": "big dashboard test",
|
||||
"stats": {
|
||||
"air::runner::execute_air": {
|
||||
"common_prefix": "air",
|
||||
"duration": "187.90ms",
|
||||
"nested": {
|
||||
"farewell_step::outcome::from_success_result": {
|
||||
"common_prefix": "air::farewell_step::outcome",
|
||||
"duration": "25.78ms",
|
||||
"nested": {
|
||||
"populate_outcome_from_contexts": {
|
||||
"common_prefix": "air::farewell_step::outcome::serde_json",
|
||||
"duration": "24.63ms",
|
||||
"nested": {
|
||||
"to_vec(call_results)": "453.00µs",
|
||||
"to_vec(data)": "20.87ms"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"preparation_step::preparation::prepare": {
|
||||
"common_prefix": "",
|
||||
"duration": "46.88ms",
|
||||
"nested": {
|
||||
"air::preparation_step::preparation::make_exec_ctx": "3.15ms",
|
||||
"air_interpreter_data::interpreter_data::serde_json::from_slice": "26.69ms",
|
||||
"air_parser::parser::air_parser::parse": "12.49ms"
|
||||
}
|
||||
},
|
||||
"runner::execute": {
|
||||
"common_prefix": "air::execution_step::instructions::call",
|
||||
"duration": "109.00ms",
|
||||
"nested": {
|
||||
"execute": {
|
||||
"common_prefix": "air::execution_step::instructions::call::resolved_call",
|
||||
"duration": "90.78ms",
|
||||
"nested": {
|
||||
"execute": {
|
||||
"common_prefix": "air::execution_step",
|
||||
"duration": "14.54ms",
|
||||
"nested": {
|
||||
"instructions::call::resolved_call::prepare_request_params": {
|
||||
"common_prefix": "air::execution_step",
|
||||
"duration": "2.61ms",
|
||||
"nested": {
|
||||
"instructions::call::resolved_call::serde_json::to_string(tetraplets)": "621.00µs",
|
||||
"resolver::resolve::resolve_ast_variable": {
|
||||
"common_prefix": "air::execution_step::resolver::resolve",
|
||||
"duration": "430.00µs",
|
||||
"nested": {
|
||||
"resolve_variable": "140.00µs"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"resolver::resolve::resolve_ast_variable": {
|
||||
"common_prefix": "air::execution_step::resolver::resolve",
|
||||
"duration": "1.31ms",
|
||||
"nested": {
|
||||
"resolve_variable": "355.00µs"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"new": {
|
||||
"common_prefix": "air::execution_step::resolver::resolve",
|
||||
"duration": "54.94ms",
|
||||
"nested": {
|
||||
"resolve_ast_scalar": {
|
||||
"common_prefix": "air::execution_step::resolver::resolve",
|
||||
"duration": "40.19ms",
|
||||
"nested": {
|
||||
"resolve_ast_variable": {
|
||||
"common_prefix": "air::execution_step::resolver::resolve",
|
||||
"duration": "25.15ms",
|
||||
"nested": {
|
||||
"resolve_variable": "8.23ms"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"total_time": "187.90ms"
|
||||
},
|
||||
"long_data": {
|
||||
"comment": "Long data trace",
|
||||
"stats": {
|
||||
"air::runner::execute_air": {
|
||||
"common_prefix": "air",
|
||||
"duration": "344.40ms",
|
||||
"nested": {
|
||||
"farewell_step::outcome::from_success_result": {
|
||||
"common_prefix": "air::farewell_step::outcome",
|
||||
"duration": "29.24ms",
|
||||
"nested": {
|
||||
"populate_outcome_from_contexts": {
|
||||
"common_prefix": "air::farewell_step::outcome::serde_json",
|
||||
"duration": "27.04ms",
|
||||
"nested": {
|
||||
"to_vec(call_results)": "142.00µs",
|
||||
"to_vec(data)": "22.00ms"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"preparation_step::preparation::prepare": {
|
||||
"common_prefix": "",
|
||||
"duration": "308.90ms",
|
||||
"nested": {
|
||||
"air::preparation_step::preparation::make_exec_ctx": "3.27ms",
|
||||
"air_interpreter_data::interpreter_data::serde_json::from_slice": "297.10ms",
|
||||
"air_parser::parser::air_parser::parse": "3.65ms"
|
||||
}
|
||||
},
|
||||
"runner::execute": "261.00µs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"total_time": "344.40ms"
|
||||
},
|
||||
"network_explore": {
|
||||
"comment": "N peers of network are discovered",
|
||||
"stats": {
|
||||
"air::runner::execute_air": {
|
||||
"common_prefix": "air",
|
||||
"duration": "80.18ms",
|
||||
"nested": {
|
||||
"farewell_step::outcome::from_success_result": {
|
||||
"common_prefix": "air::farewell_step::outcome",
|
||||
"duration": "10.68ms",
|
||||
"nested": {
|
||||
"populate_outcome_from_contexts": {
|
||||
"common_prefix": "air::farewell_step::outcome::serde_json",
|
||||
"duration": "9.30ms",
|
||||
"nested": {
|
||||
"to_vec(call_results)": "138.00µs",
|
||||
"to_vec(data)": "4.76ms"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"preparation_step::preparation::prepare": {
|
||||
"common_prefix": "",
|
||||
"duration": "34.07ms",
|
||||
"nested": {
|
||||
"air::preparation_step::preparation::make_exec_ctx": "4.15ms",
|
||||
"air_interpreter_data::interpreter_data::serde_json::from_slice": "11.86ms",
|
||||
"air_parser::parser::air_parser::parse": "13.67ms"
|
||||
}
|
||||
},
|
||||
"runner::execute": {
|
||||
"common_prefix": "air::execution_step::instructions::call",
|
||||
"duration": "29.04ms",
|
||||
"nested": {
|
||||
"execute": {
|
||||
"common_prefix": "air::execution_step::instructions::call::resolved_call",
|
||||
"duration": "15.34ms",
|
||||
"nested": {
|
||||
"execute": {
|
||||
"common_prefix": "air::execution_step::resolver::resolve",
|
||||
"duration": "5.12ms",
|
||||
"nested": {
|
||||
"resolve_ast_variable": {
|
||||
"common_prefix": "air::execution_step::resolver::resolve",
|
||||
"duration": "392.00µs",
|
||||
"nested": {
|
||||
"resolve_variable": "126.00µs"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"new": {
|
||||
"common_prefix": "air::execution_step::resolver::resolve",
|
||||
"duration": "6.34ms",
|
||||
"nested": {
|
||||
"resolve_ast_scalar": {
|
||||
"common_prefix": "air::execution_step::resolver::resolve",
|
||||
"duration": "3.88ms",
|
||||
"nested": {
|
||||
"resolve_ast_variable": {
|
||||
"common_prefix": "air::execution_step::resolver::resolve",
|
||||
"duration": "2.67ms",
|
||||
"nested": {
|
||||
"resolve_variable": "907.00µs"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"total_time": "80.18ms"
|
||||
},
|
||||
"parser_10000_100": {
|
||||
"comment": "Running very long AIR script with lot of variables and assignments",
|
||||
"stats": {
|
||||
"air::runner::execute_air": {
|
||||
"common_prefix": "air",
|
||||
"duration": "1.34s",
|
||||
"nested": {
|
||||
"farewell_step::outcome::from_success_result": {
|
||||
"common_prefix": "air::farewell_step::outcome",
|
||||
"duration": "7.32ms",
|
||||
"nested": {
|
||||
"populate_outcome_from_contexts": {
|
||||
"common_prefix": "air::farewell_step::outcome::serde_json",
|
||||
"duration": "6.24ms",
|
||||
"nested": {
|
||||
"to_vec(call_results)": "139.00µs",
|
||||
"to_vec(data)": "2.89ms"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"preparation_step::preparation::prepare": {
|
||||
"common_prefix": "",
|
||||
"duration": "1.31s",
|
||||
"nested": {
|
||||
"air::preparation_step::preparation::make_exec_ctx": "2.82ms",
|
||||
"air_parser::parser::air_parser::parse": "1.31s"
|
||||
}
|
||||
},
|
||||
"runner::execute": {
|
||||
"common_prefix": "air::execution_step::instructions::call",
|
||||
"duration": "7.53ms",
|
||||
"nested": {
|
||||
"execute": {
|
||||
"common_prefix": "air::execution_step::instructions::call::resolved_call",
|
||||
"duration": "4.54ms",
|
||||
"nested": {
|
||||
"execute": "619.00µs",
|
||||
"new": "1.27ms"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"total_time": "1.34s"
|
||||
}
|
||||
},
|
||||
"datetime": "2023-02-03 12:08:43.454860+00:00",
|
||||
"platform": "macOS-13.1-arm64-arm-64bit",
|
||||
"version": "0.35.0"
|
||||
}
|
||||
}
|
82
benches/PERFORMANCE.txt
Normal file
82
benches/PERFORMANCE.txt
Normal file
@ -0,0 +1,82 @@
|
||||
Machine d77ebe8481884bc3b2778c8083f1bf459e548e929edd87041beb14f6b868d35f:
|
||||
Platform: macOS-13.1-arm64-arm-64bit
|
||||
Timestamp: 2023-02-03 12:08:43.454860+00:00
|
||||
AquaVM version: 0.35.0
|
||||
Benches:
|
||||
big_values_data (1.20s): Loading a trace with huge values
|
||||
air::runner::execute_air: 1.20s
|
||||
preparation_step::preparation::prepare: 1.09s
|
||||
air_interpreter_data::interpreter_data::serde_json::from_slice: 1.07s
|
||||
air_parser::parser::air_parser::parse: 3.66ms
|
||||
air::preparation_step::preparation::make_exec_ctx: 2.79ms
|
||||
runner::execute: 262.00µs
|
||||
farewell_step::outcome::from_success_result: 104.61ms
|
||||
populate_outcome_from_contexts: 102.47ms
|
||||
to_vec(data): 96.71ms
|
||||
to_vec(call_results): 142.00µs
|
||||
dashboard (187.90ms): big dashboard test
|
||||
air::runner::execute_air: 187.90ms
|
||||
preparation_step::preparation::prepare: 46.88ms
|
||||
air_interpreter_data::interpreter_data::serde_json::from_slice: 26.69ms
|
||||
air_parser::parser::air_parser::parse: 12.49ms
|
||||
air::preparation_step::preparation::make_exec_ctx: 3.15ms
|
||||
runner::execute: 109.00ms
|
||||
execute: 90.78ms
|
||||
new: 54.94ms
|
||||
resolve_ast_scalar: 40.19ms
|
||||
resolve_ast_variable: 25.15ms
|
||||
resolve_variable: 8.23ms
|
||||
execute: 14.54ms
|
||||
resolver::resolve::resolve_ast_variable: 1.31ms
|
||||
resolve_variable: 355.00µs
|
||||
instructions::call::resolved_call::prepare_request_params: 2.61ms
|
||||
resolver::resolve::resolve_ast_variable: 430.00µs
|
||||
resolve_variable: 140.00µs
|
||||
instructions::call::resolved_call::serde_json::to_string(tetraplets): 621.00µs
|
||||
farewell_step::outcome::from_success_result: 25.78ms
|
||||
populate_outcome_from_contexts: 24.63ms
|
||||
to_vec(data): 20.87ms
|
||||
to_vec(call_results): 453.00µs
|
||||
long_data (344.40ms): Long data trace
|
||||
air::runner::execute_air: 344.40ms
|
||||
preparation_step::preparation::prepare: 308.90ms
|
||||
air_interpreter_data::interpreter_data::serde_json::from_slice: 297.10ms
|
||||
air_parser::parser::air_parser::parse: 3.65ms
|
||||
air::preparation_step::preparation::make_exec_ctx: 3.27ms
|
||||
runner::execute: 261.00µs
|
||||
farewell_step::outcome::from_success_result: 29.24ms
|
||||
populate_outcome_from_contexts: 27.04ms
|
||||
to_vec(data): 22.00ms
|
||||
to_vec(call_results): 142.00µs
|
||||
network_explore (80.18ms): N peers of network are discovered
|
||||
air::runner::execute_air: 80.18ms
|
||||
preparation_step::preparation::prepare: 34.07ms
|
||||
air_interpreter_data::interpreter_data::serde_json::from_slice: 11.86ms
|
||||
air_parser::parser::air_parser::parse: 13.67ms
|
||||
air::preparation_step::preparation::make_exec_ctx: 4.15ms
|
||||
runner::execute: 29.04ms
|
||||
execute: 15.34ms
|
||||
new: 6.34ms
|
||||
resolve_ast_scalar: 3.88ms
|
||||
resolve_ast_variable: 2.67ms
|
||||
resolve_variable: 907.00µs
|
||||
execute: 5.12ms
|
||||
resolve_ast_variable: 392.00µs
|
||||
resolve_variable: 126.00µs
|
||||
farewell_step::outcome::from_success_result: 10.68ms
|
||||
populate_outcome_from_contexts: 9.30ms
|
||||
to_vec(data): 4.76ms
|
||||
to_vec(call_results): 138.00µs
|
||||
parser_10000_100 (1.34s): Running very long AIR script with lot of variables and assignments
|
||||
air::runner::execute_air: 1.34s
|
||||
preparation_step::preparation::prepare: 1.31s
|
||||
air_parser::parser::air_parser::parse: 1.31s
|
||||
air::preparation_step::preparation::make_exec_ctx: 2.82ms
|
||||
runner::execute: 7.53ms
|
||||
execute: 4.54ms
|
||||
new: 1.27ms
|
||||
execute: 619.00µs
|
||||
farewell_step::outcome::from_success_result: 7.32ms
|
||||
populate_outcome_from_contexts: 6.24ms
|
||||
to_vec(data): 2.89ms
|
||||
to_vec(call_results): 139.00µs
|
3
benches/performance_metering/big_values_data/params.json
Normal file
3
benches/performance_metering/big_values_data/params.json
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"comment": "Loading a trace with huge values"
|
||||
}
|
File diff suppressed because one or more lines are too long
1
benches/performance_metering/big_values_data/script.air
Normal file
1
benches/performance_metering/big_values_data/script.air
Normal file
@ -0,0 +1 @@
|
||||
(null)
|
1
benches/performance_metering/dashboard/cur_data.json
Normal file
1
benches/performance_metering/dashboard/cur_data.json
Normal file
File diff suppressed because one or more lines are too long
3
benches/performance_metering/dashboard/params.json
Normal file
3
benches/performance_metering/dashboard/params.json
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"comment": "big dashboard test"
|
||||
}
|
1
benches/performance_metering/dashboard/prev_data.json
Normal file
1
benches/performance_metering/dashboard/prev_data.json
Normal file
File diff suppressed because one or more lines are too long
95
benches/performance_metering/dashboard/script.air
Normal file
95
benches/performance_metering/dashboard/script.air
Normal file
@ -0,0 +1,95 @@
|
||||
(seq
|
||||
(call %init_peer_id% ("" "load") ["relayId"] relayId)
|
||||
(seq
|
||||
(call %init_peer_id% ("" "load") ["knownPeers"] knownPeers)
|
||||
(seq
|
||||
(call %init_peer_id% ("" "load") ["clientId"] clientId)
|
||||
; get info from relay
|
||||
(par
|
||||
(seq
|
||||
(call relayId ("op" "identity") [])
|
||||
(seq
|
||||
(call relayId ("op" "identify") [] ident)
|
||||
(seq
|
||||
(call relayId ("dist" "get_blueprints") [] blueprints)
|
||||
(seq
|
||||
(call relayId ("dist" "get_modules") [] modules)
|
||||
(seq
|
||||
(call relayId ("srv" "get_interfaces") [] interfaces)
|
||||
(seq
|
||||
(call relayId ("op" "identity") [])
|
||||
(call %init_peer_id% ("event" "all_info") [relayId ident interfaces blueprints modules])
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
(par
|
||||
; iterate over known peers and get their info
|
||||
(fold knownPeers p
|
||||
(par
|
||||
(seq
|
||||
(call p ("op" "identity") [])
|
||||
(seq
|
||||
(call p ("op" "identify") [] ident)
|
||||
(seq
|
||||
(call p ("dist" "get_blueprints") [] blueprints)
|
||||
(seq
|
||||
(call p ("dist" "get_modules") [] modules)
|
||||
(seq
|
||||
(call p ("srv" "get_interfaces") [] interfaces)
|
||||
(seq
|
||||
(call relayId ("op" "identity") [])
|
||||
(call %init_peer_id% ("event" "all_info") [p ident interfaces blueprints modules])
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
(next p)
|
||||
)
|
||||
)
|
||||
; call on relay neighborhood
|
||||
(seq
|
||||
(call relayId ("op" "identity") [])
|
||||
(seq
|
||||
(call relayId ("dht" "neighborhood") [clientId] neigh)
|
||||
(fold neigh n
|
||||
; call neighborhood on every relays' neighbours
|
||||
(par
|
||||
(seq
|
||||
(call n ("dht" "neighborhood") [clientId] moreNeigh)
|
||||
(fold moreNeigh mp
|
||||
(par
|
||||
(seq
|
||||
(call mp ("op" "identify") [] ident)
|
||||
(seq
|
||||
(call mp ("dist" "get_blueprints") [] blueprints)
|
||||
(seq
|
||||
(call mp ("dist" "get_modules") [] modules)
|
||||
(seq
|
||||
(call mp ("srv" "get_interfaces") [] interfaces)
|
||||
(seq
|
||||
(call relayId ("op" "identity") [])
|
||||
(call %init_peer_id% ("event" "all_info") [mp ident interfaces blueprints modules])
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
(next mp)
|
||||
)
|
||||
)
|
||||
)
|
||||
(next n)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
1
benches/performance_metering/long_data/cur_data.json
Normal file
1
benches/performance_metering/long_data/cur_data.json
Normal file
File diff suppressed because one or more lines are too long
3
benches/performance_metering/long_data/params.json
Normal file
3
benches/performance_metering/long_data/params.json
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"comment": "Long data trace"
|
||||
}
|
1
benches/performance_metering/long_data/script.air
Normal file
1
benches/performance_metering/long_data/script.air
Normal file
@ -0,0 +1 @@
|
||||
(null)
|
@ -0,0 +1 @@
|
||||
{"trace":[{"call":{"executed":{"scalar":"bagaaiera4zxm56wtt5v52gaipgkmrfmdsah5cyogg4drypagaxir2edqgybq"}}},{"call":{"executed":{"scalar":"bagaaierallnufmo36pjkpszonpfnf6u75dqjenrjpbh2a46wlyc5e7ajibpa"}}},{"call":{"executed":{"scalar":"bagaaierapde2fns225s3yx6s7dfzgtd6t4vgc4mvakcepixld6uytrufv4ba"}}},{"call":{"executed":{"stream":{"cid":"bagaaierakeyeajayshf6ht5wzz6n35stf3w2tnwxvutef726w4eut3topvwq","generation":0}}}},{"call":{"executed":{"stream":{"cid":"bagaaieras6qfexd6riwalgcnvuabg3usf77jdueudietwocvce47turjnwpa","generation":1}}}},{"call":{"executed":{"stream":{"cid":"bagaaieras6qfexd6riwalgcnvuabg3usf77jdueudietwocvce47turjnwpa","generation":2}}}},{"call":{"executed":{"stream":{"cid":"bagaaierapde2fns225s3yx6s7dfzgtd6t4vgc4mvakcepixld6uytrufv4ba","generation":3}}}},{"fold":{"lore":[{"pos":3,"desc":[{"pos":8,"len":4},{"pos":12,"len":0}]},{"pos":4,"desc":[{"pos":12,"len":3},{"pos":15,"len":0}]},{"pos":5,"desc":[{"pos":15,"len":3},{"pos":18,"len":0}]},{"pos":6,"desc":[{"pos":18,"len":4},{"pos":22,"len":0}]}]}},{"call":{"executed":{"stream":{"cid":"bagaaierakeyeajayshf6ht5wzz6n35stf3w2tnwxvutef726w4eut3topvwq","generation":1}}}},{"call":{"executed":{"stream":{"cid":"bagaaieras6qfexd6riwalgcnvuabg3usf77jdueudietwocvce47turjnwpa","generation":2}}}},{"call":{"executed":{"stream":{"cid":"bagaaierapde2fns225s3yx6s7dfzgtd6t4vgc4mvakcepixld6uytrufv4ba","generation":3}}}},{"call":{"executed":{"stream":{"cid":"bagaaieras6qfexd6riwalgcnvuabg3usf77jdueudietwocvce47turjnwpa","generation":4}}}},{"call":{"executed":{"stream":{"cid":"bagaaierapde2fns225s3yx6s7dfzgtd6t4vgc4mvakcepixld6uytrufv4ba","generation":0}}}},{"call":{"executed":{"stream":{"cid":"bagaaieras6qfexd6riwalgcnvuabg3usf77jdueudietwocvce47turjnwpa","generation":2}}}},{"call":{"sent_by":"client_3_id"}},{"call":{"executed":{"stream":{"cid":"bagaaierapde2fns225s3yx6s7dfzgtd6t4vgc4mvakcepixld6uytrufv4ba","generation":0}}}},{"call":{"executed":{"stream":{"cid":"bagaaieras6qfexd6riwalgcnvuabg3usf77jdueudietwocvce47turjnwpa","generation":2}}}},{"call":{"sent_by":"client_3_id"}},{"call":{"executed":{"stream":{"cid":"bagaaierakeyeajayshf6ht5wzz6n35stf3w2tnwxvutef726w4eut3topvwq","generation":1}}}},{"call":{"executed":{"stream":{"cid":"bagaaieras6qfexd6riwalgcnvuabg3usf77jdueudietwocvce47turjnwpa","generation":4}}}},{"call":{"executed":{"stream":{"cid":"bagaaieras6qfexd6riwalgcnvuabg3usf77jdueudietwocvce47turjnwpa","generation":5}}}},{"call":{"sent_by":"client_3_id"}}],"streams":{"$neighs_inner":4,"$services":6},"version":"0.6.0","lcid":5,"r_streams":{},"interpreter_version":"0.35.0","cid_info":{"value_store":{"bagaaierallnufmo36pjkpszonpfnf6u75dqjenrjpbh2a46wlyc5e7ajibpa":"client_id","bagaaieras6qfexd6riwalgcnvuabg3usf77jdueudietwocvce47turjnwpa":["relay_id","client_3_id","client_1_id","client_2_id"],"bagaaierapde2fns225s3yx6s7dfzgtd6t4vgc4mvakcepixld6uytrufv4ba":["client_1_id","client_2_id","client_3_id","relay_id"],"bagaaiera4zxm56wtt5v52gaipgkmrfmdsah5cyogg4drypagaxir2edqgybq":"relay_id","bagaaierakeyeajayshf6ht5wzz6n35stf3w2tnwxvutef726w4eut3topvwq":["client_1_id","client_3_id","relay_id","client_2_id"]},"tetraplet_store":{},"canon_store":{}}}
|
3
benches/performance_metering/network_explore/params.json
Normal file
3
benches/performance_metering/network_explore/params.json
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"comment": "5 peers of network are discovered"
|
||||
}
|
@ -0,0 +1 @@
|
||||
{"trace":[{"call":{"executed":{"scalar":"bagaaiera4zxm56wtt5v52gaipgkmrfmdsah5cyogg4drypagaxir2edqgybq"}}},{"call":{"executed":{"scalar":"bagaaierallnufmo36pjkpszonpfnf6u75dqjenrjpbh2a46wlyc5e7ajibpa"}}},{"call":{"executed":{"scalar":"bagaaierapde2fns225s3yx6s7dfzgtd6t4vgc4mvakcepixld6uytrufv4ba"}}},{"call":{"executed":{"stream":{"cid":"bagaaierakeyeajayshf6ht5wzz6n35stf3w2tnwxvutef726w4eut3topvwq","generation":0}}}},{"call":{"executed":{"stream":{"cid":"bagaaieras6qfexd6riwalgcnvuabg3usf77jdueudietwocvce47turjnwpa","generation":1}}}},{"call":{"executed":{"stream":{"cid":"bagaaieras6qfexd6riwalgcnvuabg3usf77jdueudietwocvce47turjnwpa","generation":2}}}},{"call":{"executed":{"stream":{"cid":"bagaaierapde2fns225s3yx6s7dfzgtd6t4vgc4mvakcepixld6uytrufv4ba","generation":3}}}},{"fold":{"lore":[{"pos":3,"desc":[{"pos":8,"len":4},{"pos":12,"len":0}]},{"pos":4,"desc":[{"pos":12,"len":3},{"pos":15,"len":0}]},{"pos":5,"desc":[{"pos":15,"len":3},{"pos":18,"len":0}]},{"pos":6,"desc":[{"pos":18,"len":2},{"pos":20,"len":0}]}]}},{"call":{"executed":{"stream":{"cid":"bagaaierakeyeajayshf6ht5wzz6n35stf3w2tnwxvutef726w4eut3topvwq","generation":1}}}},{"call":{"executed":{"stream":{"cid":"bagaaieras6qfexd6riwalgcnvuabg3usf77jdueudietwocvce47turjnwpa","generation":2}}}},{"call":{"executed":{"stream":{"cid":"bagaaierapde2fns225s3yx6s7dfzgtd6t4vgc4mvakcepixld6uytrufv4ba","generation":3}}}},{"call":{"sent_by":"relay_id"}},{"call":{"executed":{"stream":{"cid":"bagaaierapde2fns225s3yx6s7dfzgtd6t4vgc4mvakcepixld6uytrufv4ba","generation":0}}}},{"call":{"executed":{"stream":{"cid":"bagaaieras6qfexd6riwalgcnvuabg3usf77jdueudietwocvce47turjnwpa","generation":2}}}},{"call":{"sent_by":"client_3_id"}},{"call":{"executed":{"stream":{"cid":"bagaaierapde2fns225s3yx6s7dfzgtd6t4vgc4mvakcepixld6uytrufv4ba","generation":0}}}},{"call":{"executed":{"stream":{"cid":"bagaaieras6qfexd6riwalgcnvuabg3usf77jdueudietwocvce47turjnwpa","generation":2}}}},{"call":{"sent_by":"client_3_id"}},{"call":{"executed":{"stream":{"cid":"bagaaierakeyeajayshf6ht5wzz6n35stf3w2tnwxvutef726w4eut3topvwq","generation":1}}}},{"call":{"sent_by":"client_1_id"}}],"streams":{"$neighs_inner":4,"$services":4},"version":"0.6.0","lcid":5,"r_streams":{},"interpreter_version":"0.35.0","cid_info":{"value_store":{"bagaaierakeyeajayshf6ht5wzz6n35stf3w2tnwxvutef726w4eut3topvwq":["client_1_id","client_3_id","relay_id","client_2_id"],"bagaaierallnufmo36pjkpszonpfnf6u75dqjenrjpbh2a46wlyc5e7ajibpa":"client_id","bagaaieras6qfexd6riwalgcnvuabg3usf77jdueudietwocvce47turjnwpa":["relay_id","client_3_id","client_1_id","client_2_id"],"bagaaierapde2fns225s3yx6s7dfzgtd6t4vgc4mvakcepixld6uytrufv4ba":["client_1_id","client_2_id","client_3_id","relay_id"],"bagaaiera4zxm56wtt5v52gaipgkmrfmdsah5cyogg4drypagaxir2edqgybq":"relay_id"},"tetraplet_store":{},"canon_store":{}}}
|
27
benches/performance_metering/network_explore/script.air
Normal file
27
benches/performance_metering/network_explore/script.air
Normal file
@ -0,0 +1,27 @@
|
||||
(seq
|
||||
(seq
|
||||
(seq
|
||||
(call "client_id" ("" "") ["relay"] relay)
|
||||
(call "client_id" ("" "") ["client"] client))
|
||||
(seq
|
||||
(call relay ("dht" "neighborhood") [relay] neighs_top) ;
|
||||
(seq
|
||||
(fold neighs_top n
|
||||
(seq
|
||||
(call n ("dht" "neighborhood") [n] $neighs_inner)
|
||||
(next n)))
|
||||
(fold $neighs_inner ns
|
||||
(seq
|
||||
(fold ns n
|
||||
(seq
|
||||
(call n ("op" "identify") [] $services)
|
||||
(next n)))
|
||||
(next ns))))))
|
||||
(seq
|
||||
(call relay ("op" "identity") [])
|
||||
|
||||
(seq (seq
|
||||
(canon client $services #services)
|
||||
(canon client $neighs_inner #neighs_inner)
|
||||
)
|
||||
(call client ("return" "") [#services #neighs_inner neighs_top]) )))
|
@ -0,0 +1,3 @@
|
||||
{
|
||||
"comment": "Running very long AIR script with lot of variables and assignments"
|
||||
}
|
1
benches/performance_metering/parser_10000_100/script.air
Normal file
1
benches/performance_metering/parser_10000_100/script.air
Normal file
File diff suppressed because one or more lines are too long
10
junk/cidify/Cargo.toml
Normal file
10
junk/cidify/Cargo.toml
Normal file
@ -0,0 +1,10 @@
|
||||
[package]
|
||||
name = "cidify"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
air-interpreter-cid = { version = "0.2.0", path = "../../crates/air-lib/interpreter-cid" }
|
||||
air-interpreter-data = { version = "0.6.0", path = "../../crates/air-lib/interpreter-data" }
|
||||
serde = { version = "1.0.152", features = ["derive"]}
|
||||
serde_json = "1.0.91"
|
41
junk/cidify/src/main.rs
Normal file
41
junk/cidify/src/main.rs
Normal file
@ -0,0 +1,41 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{json, Value};
|
||||
|
||||
#[derive(Deserialize, Debug, Serialize)]
|
||||
pub struct PreCidInterpeterData {
|
||||
trace: Vec<serde_json::Value>,
|
||||
|
||||
#[serde(flatten)]
|
||||
other_fields: serde_json::Value,
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let stdin = std::io::stdin();
|
||||
let mut data: PreCidInterpeterData =
|
||||
serde_json::from_reader(stdin).expect("Expect to be readable");
|
||||
let mut values = air_interpreter_data::CidTracker::<Value>::new();
|
||||
for elt in &mut data.trace {
|
||||
let obj = elt.as_object_mut().unwrap();
|
||||
if let Some(call) = obj.get_mut("call") {
|
||||
if let Some(executed) = call.as_object_mut().unwrap().get_mut("executed") {
|
||||
if let Some(scalar) = executed.as_object_mut().unwrap().get_mut("scalar") {
|
||||
let cid = values.record_value(scalar.clone()).expect("Expect to CID");
|
||||
*scalar = json!(cid);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
data.other_fields.as_object_mut().unwrap().insert(
|
||||
"cid_info".to_owned(),
|
||||
json!({
|
||||
"value_store": Into::<air_interpreter_data::CidStore<_>>::into(values),
|
||||
"tetraplet_store": {},
|
||||
"canon_store": {},
|
||||
}),
|
||||
);
|
||||
data.other_fields
|
||||
.as_object_mut()
|
||||
.unwrap()
|
||||
.insert("interpreter_version".to_owned(), json!("0.35.1"));
|
||||
serde_json::to_writer(std::io::stdout(), &data).unwrap();
|
||||
}
|
21
tools/cli/performance_metering/README.md
Normal file
21
tools/cli/performance_metering/README.md
Normal file
@ -0,0 +1,21 @@
|
||||
# The `air_perofrmance_metering` utility
|
||||
|
||||
Execute an AquaVM special benchmarking suite and recort results with some meta information to `benches/PERFORMANCE.json` database.
|
||||
|
||||
This script is intended to be run from the project root. It uses the `air-trace` through `cargo`, without installation.
|
||||
|
||||
# Installation
|
||||
|
||||
Run in the project run:
|
||||
``` sh
|
||||
pip install tools/cli/performance_metering
|
||||
|
||||
```
|
||||
|
||||
# Usage
|
||||
In the project root, run
|
||||
``` sh
|
||||
aquavm_performance_metering run
|
||||
```
|
||||
|
||||
You may also pass the `--repeat N` option to do multiple runs with averaging.
|
@ -0,0 +1,15 @@
|
||||
#
|
||||
# Copyright 2023 Fluence Labs Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
125
tools/cli/performance_metering/performance_metering/bench.py
Normal file
125
tools/cli/performance_metering/performance_metering/bench.py
Normal file
@ -0,0 +1,125 @@
|
||||
#
|
||||
# Copyright 2023 Fluence Labs Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""A bench module."""
|
||||
import json
|
||||
import logging
|
||||
import os.path
|
||||
import subprocess
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class _Params:
|
||||
comment: Optional[str]
|
||||
args: dict
|
||||
|
||||
def __init__(self, comment, args):
|
||||
self.comment = comment
|
||||
self.args = args
|
||||
|
||||
@staticmethod
|
||||
def _load_params(bench_path):
|
||||
try:
|
||||
params_path = os.path.join(bench_path, "params.json")
|
||||
with open(params_path, 'r', encoding="utf8") as inp:
|
||||
data = json.load(inp)
|
||||
comment = data.pop('comment')
|
||||
return _Params(comment, data)
|
||||
except IOError:
|
||||
return _Params(None, {})
|
||||
|
||||
|
||||
class Bench:
|
||||
"""Single bench consists of `air-trace run` parameters."""
|
||||
|
||||
path: str
|
||||
params: _Params
|
||||
prev_data_path: str
|
||||
cur_data_path: str
|
||||
air_script_path: str
|
||||
native: bool
|
||||
|
||||
def __init__(self, bench_path: str, native: bool = False):
|
||||
"""Load data."""
|
||||
self.path = bench_path
|
||||
|
||||
self.params = _Params._load_params(bench_path)
|
||||
self.prev_data_path = discover_file(bench_path, "prev_data.json")
|
||||
self.cur_data_path = discover_file(bench_path, "cur_data.json")
|
||||
self.air_script_path = discover_file(bench_path, "script.air")
|
||||
self.native = native
|
||||
|
||||
def run(self, repeat, tracing_params):
|
||||
"""Run the bench, storing and parsing its output."""
|
||||
logger.info("Executing %s...", self.get_name())
|
||||
return self._execute(repeat, tracing_params)
|
||||
|
||||
def _execute(self, repeat, tracing_params) -> str:
|
||||
all_output = []
|
||||
for _ in range(repeat):
|
||||
proc = subprocess.run(
|
||||
[
|
||||
"cargo", "run",
|
||||
"--quiet",
|
||||
"--release",
|
||||
"--package", "air-trace",
|
||||
"--",
|
||||
"run",
|
||||
"--json",
|
||||
"--repeat", "1",
|
||||
] + (
|
||||
["--native"] if self.native else []
|
||||
) + [
|
||||
"--tracing-params", tracing_params,
|
||||
"--plain",
|
||||
"--data", self.cur_data_path,
|
||||
"--prev-data", self.prev_data_path,
|
||||
"--script", self.air_script_path,
|
||||
] + [
|
||||
arg
|
||||
for (param, val) in self.params.args.items()
|
||||
for arg in ('--' + param, val)
|
||||
],
|
||||
check=True,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
lines = proc.stderr.decode("utf-8").split('\n')
|
||||
all_output.extend(lines)
|
||||
return list(map(json.loads, filter(lambda x: x, all_output)))
|
||||
|
||||
def get_name(self):
|
||||
"""Return the bench name."""
|
||||
return os.path.basename(self.path)
|
||||
|
||||
def get_comment(self):
|
||||
"""Return the bench comment."""
|
||||
return self.params.comment
|
||||
|
||||
def __repr__(self):
|
||||
"""`repr` implementation."""
|
||||
return "Bench({!r}, {!r})".format(
|
||||
os.path.basename(self.path),
|
||||
self.params
|
||||
)
|
||||
|
||||
|
||||
def discover_file(base_dir: str, filename: str) -> str:
|
||||
"""Return the file in the base_dir, checking it can be read."""
|
||||
path = os.path.join(base_dir, filename)
|
||||
with open(path, 'r', encoding="utf8"):
|
||||
pass
|
||||
return path
|
106
tools/cli/performance_metering/performance_metering/db.py
Normal file
106
tools/cli/performance_metering/performance_metering/db.py
Normal file
@ -0,0 +1,106 @@
|
||||
#
|
||||
# Copyright 2023 Fluence Labs Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""Performance measurement database module."""
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import platform
|
||||
from typing import Optional
|
||||
|
||||
from .helpers import get_host_id, get_aquavm_version, intermediate_temp_file
|
||||
|
||||
DEFAULT_JSON_PATH = "benches/PERFORMANCE.json"
|
||||
DEFAULT_TEXT_PATH = "benches/PERFORMANCE.yaml"
|
||||
AQUAVM_TOML_PATH = "air/Cargo.toml"
|
||||
|
||||
|
||||
class Db:
|
||||
"""Performance measurement database."""
|
||||
|
||||
path: str
|
||||
host_id: str
|
||||
data: hash
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
json_path: Optional[str],
|
||||
text_path: Optional[str],
|
||||
host_id=None
|
||||
):
|
||||
"""Load data from file, if it exits."""
|
||||
if json_path is None:
|
||||
json_path = DEFAULT_JSON_PATH
|
||||
self.json_path = json_path
|
||||
|
||||
if text_path is None:
|
||||
text_path = DEFAULT_TEXT_PATH
|
||||
self.text_path = text_path
|
||||
|
||||
if host_id is None:
|
||||
host_id = get_host_id()
|
||||
self.host_id = host_id
|
||||
|
||||
try:
|
||||
with open(json_path, 'r', encoding="utf-8") as inp:
|
||||
self.data = json.load(inp)
|
||||
except IOError as ex:
|
||||
logging.warning("cannot open data at %r: %s", json_path, ex)
|
||||
self.data = {}
|
||||
|
||||
def record(self, bench, stats, total_time):
|
||||
"""Record the bench stats."""
|
||||
if self.host_id not in self.data:
|
||||
self.data[self.host_id] = {"benches": {}}
|
||||
bench_name = bench.get_name()
|
||||
|
||||
self.data[self.host_id]["benches"][bench_name] = {
|
||||
"stats": stats,
|
||||
"total_time": total_time,
|
||||
}
|
||||
self.data[self.host_id]["platform"] = platform.platform()
|
||||
self.data[self.host_id]["datetime"] = str(
|
||||
datetime.datetime.now(datetime.timezone.utc)
|
||||
)
|
||||
self.data[self.host_id]["version"] = get_aquavm_version(
|
||||
AQUAVM_TOML_PATH
|
||||
)
|
||||
|
||||
comment = bench.get_comment()
|
||||
if comment is not None:
|
||||
self.data[self.host_id]["benches"][bench_name]["comment"] = comment
|
||||
|
||||
def save(self):
|
||||
"""Save the database to JSON."""
|
||||
with intermediate_temp_file(self.json_path) as out:
|
||||
json.dump(
|
||||
self.data, out,
|
||||
# for better diffs and readable files:
|
||||
sort_keys=True,
|
||||
indent=2,
|
||||
ensure_ascii=False,
|
||||
)
|
||||
# Add a new line for data readability
|
||||
print("", file=out)
|
||||
|
||||
def __enter__(self):
|
||||
"""Enter context manager."""
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
"""Exit context manger, saving data if the exit is clean."""
|
||||
if exc_type is None:
|
||||
self.save()
|
@ -0,0 +1,92 @@
|
||||
#
|
||||
# Copyright 2023 Fluence Labs Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""Helper functions for performance_metering."""
|
||||
import datetime
|
||||
import hashlib
|
||||
import os.path
|
||||
import socket
|
||||
import tempfile
|
||||
from contextlib import contextmanager
|
||||
from typing import Optional
|
||||
|
||||
import toml
|
||||
|
||||
# The ordering of elements is important.
|
||||
TIME_SUFFIXES = [("ns", 1e-9), ("µs", 1e-6), ("ms", 1e-3), ("s", 1e0)]
|
||||
|
||||
|
||||
def parse_trace_timedelta(inp: Optional[str]) -> datetime.timedelta:
|
||||
"""Parse `tracing`-formatted execution times."""
|
||||
if inp is None:
|
||||
return datetime.timedelta()
|
||||
for (suffix, scale) in TIME_SUFFIXES:
|
||||
if inp.endswith(suffix):
|
||||
val = float(inp[:-len(suffix)])
|
||||
seconds = val * scale
|
||||
return datetime.timedelta(seconds=seconds)
|
||||
raise ValueError("Unknown time suffix")
|
||||
|
||||
|
||||
def format_timedelta(td: datetime.timedelta) -> str:
|
||||
"""Print execution times to `tracing` format."""
|
||||
seconds = td.total_seconds()
|
||||
for (suffix, scale) in reversed(TIME_SUFFIXES):
|
||||
if seconds >= scale:
|
||||
return f"{seconds / scale:0.2f}{suffix}"
|
||||
# else
|
||||
(suffix, scale) = TIME_SUFFIXES[-1]
|
||||
return f"{seconds / scale:0.2f}{suffix}"
|
||||
|
||||
|
||||
def get_host_id() -> str:
|
||||
"""Return a hash of host id."""
|
||||
|
||||
hostname = socket.gethostname().encode('utf-8')
|
||||
return hashlib.sha256(hostname).hexdigest()
|
||||
|
||||
|
||||
def get_aquavm_version(path: str) -> str:
|
||||
"""Get `version` field from a TOML file."""
|
||||
with open(path, 'r', encoding="utf8") as inp:
|
||||
data = toml.load(inp)
|
||||
return data['package']['version']
|
||||
|
||||
|
||||
@contextmanager
|
||||
def intermediate_temp_file(target_file: str):
|
||||
"""
|
||||
Context manager that create an intermediate temp file.
|
||||
|
||||
It to be used as an itermediate for owerwriting the target file on
|
||||
success.
|
||||
"""
|
||||
out = tempfile.NamedTemporaryFile(
|
||||
mode="w+",
|
||||
dir=os.path.dirname(target_file),
|
||||
prefix=os.path.basename(target_file) + ".",
|
||||
encoding="utf-8",
|
||||
delete=False,
|
||||
)
|
||||
try:
|
||||
yield out
|
||||
out.flush()
|
||||
os.rename(out.name, target_file)
|
||||
except:
|
||||
out.close()
|
||||
try:
|
||||
os.remove(out.name)
|
||||
except OSError:
|
||||
pass
|
54
tools/cli/performance_metering/performance_metering/main.py
Normal file
54
tools/cli/performance_metering/performance_metering/main.py
Normal file
@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright 2023 Fluence Labs Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""An AquaVM performance metering tool."""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
|
||||
from . import run
|
||||
|
||||
|
||||
def main():
|
||||
"""Run main function."""
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
subp = parser.add_subparsers(dest='command')
|
||||
|
||||
run_subparser = subp.add_parser("run")
|
||||
run_subparser.add_argument("--path", required=False, type=str)
|
||||
run_subparser.add_argument("--report-path", required=False, type=str)
|
||||
run_subparser.add_argument("--host-id", required=False, type=str)
|
||||
run_subparser.add_argument("--bench-dir", required=False, type=str)
|
||||
run_subparser.add_argument("--repeat", required=False, type=int, default=1)
|
||||
run_subparser.add_argument(
|
||||
"--no-prepare-binaries",
|
||||
action='store_false',
|
||||
dest='prepare_binaries',
|
||||
)
|
||||
run_subparser.add_argument("--tracing-params", type=str, default="trace")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.command == 'run':
|
||||
run.run(args)
|
||||
else:
|
||||
parser.error(f"Unknown command {args.command!r}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
79
tools/cli/performance_metering/performance_metering/run.py
Normal file
79
tools/cli/performance_metering/performance_metering/run.py
Normal file
@ -0,0 +1,79 @@
|
||||
#
|
||||
# Copyright 2023 Fluence Labs Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""Running benches."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import typing
|
||||
|
||||
from .bench import Bench
|
||||
from .db import Db
|
||||
from .helpers import intermediate_temp_file
|
||||
from .text_report import TextReporter
|
||||
from .trace_walker import TraceWalker
|
||||
|
||||
DEFAULT_TEST_DIR = "benches/performance_metering"
|
||||
DEFAULT_REPORT_PATH = "benches/PERFORMANCE.txt"
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _prepare(args):
|
||||
"""Prepare the environment: build the tools required."""
|
||||
if args.prepare_binaries:
|
||||
logger.info("Build air-interpreter...")
|
||||
subprocess.check_call([
|
||||
"marine", "build", "--release", "--features", "marine",
|
||||
"--package", "air-interpreter",
|
||||
])
|
||||
logger.info("Build air-trace...")
|
||||
subprocess.check_call([
|
||||
"cargo", "build", "--release", "--package", "air-trace",
|
||||
])
|
||||
|
||||
|
||||
def discover_tests(bench_dir: typing.Optional[str]) -> list[Bench]:
|
||||
"""Discover bench suite elements."""
|
||||
if bench_dir is None:
|
||||
bench_dir = DEFAULT_TEST_DIR
|
||||
return list(map(
|
||||
lambda filename: Bench(os.path.join(bench_dir, filename)),
|
||||
sorted(os.listdir(bench_dir))
|
||||
))
|
||||
|
||||
|
||||
def run(args):
|
||||
"""Run test suite, saving results to database."""
|
||||
_prepare(args)
|
||||
|
||||
suite = discover_tests(args.bench_dir)
|
||||
with Db(args.path, args.host_id) as db:
|
||||
for bench in suite:
|
||||
raw_stats = bench.run(args.repeat, args.tracing_params)
|
||||
walker = TraceWalker()
|
||||
walker.process(raw_stats)
|
||||
|
||||
combined_stats = walker.to_json(args.repeat)
|
||||
total_time = walker.get_total_time(args.repeat)
|
||||
db.record(bench, combined_stats, total_time)
|
||||
|
||||
with (
|
||||
intermediate_temp_file(
|
||||
args.report_path or DEFAULT_REPORT_PATH) as out
|
||||
):
|
||||
report = TextReporter(db.data)
|
||||
report.save_text_report(out)
|
@ -0,0 +1,76 @@
|
||||
#
|
||||
# Copyright 2023 Fluence Labs Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""Human readable text report generation."""
|
||||
|
||||
|
||||
class TextReporter:
|
||||
"""A generator for human readable text report."""
|
||||
data: dict
|
||||
indent_step = 2
|
||||
|
||||
def __init__(self, data):
|
||||
"""Construct a reporter for db data."""
|
||||
self.data = data
|
||||
|
||||
def save_text_report(self, file):
|
||||
"""Save report to the file."""
|
||||
for machine_id, machine in self.data.items():
|
||||
_print_indent("Machine {}:".format(machine_id),
|
||||
indent=0, file=file)
|
||||
self._save_machine(machine, file=file)
|
||||
|
||||
def _save_machine(self, machine, file):
|
||||
indent = self.indent_step
|
||||
_print_indent("Platform: {}".format(machine["platform"]),
|
||||
indent=indent, file=file)
|
||||
_print_indent("Timestamp: {}".format(machine["datetime"]),
|
||||
indent=indent, file=file)
|
||||
_print_indent("AquaVM version: {}".format(machine["version"]),
|
||||
indent=indent, file=file)
|
||||
_print_indent("Benches:", indent=indent, file=file)
|
||||
|
||||
nested_indent = indent + self.indent_step
|
||||
for bench_name, bench in machine["benches"].items():
|
||||
self._save_bench(
|
||||
bench_name, bench, indent=nested_indent, file=file)
|
||||
|
||||
def _save_bench(self, bench_name, bench, indent, file):
|
||||
_print_indent(
|
||||
"{} ({}): {}".format(
|
||||
bench_name, bench["total_time"], bench["comment"]),
|
||||
indent=indent, file=file)
|
||||
for fname, stats in bench["stats"].items():
|
||||
self._save_stats(fname, stats, indent + self.indent_step, file)
|
||||
|
||||
def _save_stats(self, fname, stats, indent, file):
|
||||
if isinstance(stats, dict):
|
||||
duration = stats["duration"]
|
||||
|
||||
_print_indent(
|
||||
"{}: {}".format(fname, duration),
|
||||
indent=indent,
|
||||
file=file)
|
||||
for nested_fname, nested_stats in stats["nested"].items():
|
||||
self._save_stats(nested_fname, nested_stats,
|
||||
indent=(indent + self.indent_step), file=file)
|
||||
else:
|
||||
assert isinstance(stats, str)
|
||||
_print_indent("{}: {}".format(fname, stats),
|
||||
indent=indent, file=file)
|
||||
|
||||
|
||||
def _print_indent(line, indent, file):
|
||||
print("{:<{indent}}{}".format("", line, indent=indent), file=file)
|
@ -0,0 +1,206 @@
|
||||
#
|
||||
# Copyright 2023 Fluence Labs Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""Trace stateful processing."""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
from itertools import zip_longest
|
||||
from typing import Optional
|
||||
|
||||
from .helpers import format_timedelta, parse_trace_timedelta
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TraceRecord:
|
||||
"""Trace record grouped by fully-qualified function name."""
|
||||
|
||||
message: str
|
||||
span: str
|
||||
target: str
|
||||
execution_time: datetime.timedelta
|
||||
spans: list
|
||||
nested: dict
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str,
|
||||
span: str,
|
||||
target: str,
|
||||
raw_time: Optional[str],
|
||||
spans: list
|
||||
):
|
||||
"""Create a TraceRecord instance."""
|
||||
self.message = message
|
||||
self.span = span
|
||||
self.target = target
|
||||
self.execution_time = parse_trace_timedelta(raw_time)
|
||||
self.spans = spans
|
||||
self.nested = {}
|
||||
|
||||
def get_span(self):
|
||||
"""Get current span."""
|
||||
return self.span
|
||||
|
||||
def get_parents(self):
|
||||
"""Get parent spans."""
|
||||
return iter(self.spans)
|
||||
|
||||
def get_func_name(self) -> str:
|
||||
"""Return qualified function name."""
|
||||
if self.target is None:
|
||||
return self.span
|
||||
return f"{self.target}::{self.span}"
|
||||
|
||||
def to_json(self, repeat: int) -> dict:
|
||||
"""Convert trace to JSON report."""
|
||||
duration = format_timedelta(self.execution_time / repeat)
|
||||
if self.nested:
|
||||
prefix = _common_prefix(self.nested)
|
||||
nested = {
|
||||
_split_prefix(fname, prefix): trace_record.to_json(repeat)
|
||||
for (fname, trace_record) in self.nested.items()
|
||||
}
|
||||
|
||||
result = {
|
||||
"common_prefix": "::".join(prefix),
|
||||
"duration": duration,
|
||||
"nested": nested,
|
||||
}
|
||||
else:
|
||||
result = duration
|
||||
return result
|
||||
|
||||
def __repr__(self):
|
||||
"""Return debug representation."""
|
||||
return "{}@{}<span={!r}, spans={!r}, time={}, nested={!r}>".format(
|
||||
self.__class__.__name__,
|
||||
id(self),
|
||||
self.span,
|
||||
self.spans,
|
||||
self.execution_time,
|
||||
self.nested,
|
||||
)
|
||||
|
||||
|
||||
def _common_prefix(nested: dict) -> list[str]:
|
||||
items = iter(nested.keys())
|
||||
prefix = next(items).split("::")[:-1]
|
||||
for fname in items:
|
||||
fname_split = fname.split("::")[:-1]
|
||||
new_prefix = []
|
||||
for (old, new) in zip(prefix, fname_split):
|
||||
if old == new:
|
||||
new_prefix.append(old)
|
||||
else:
|
||||
break
|
||||
prefix = new_prefix
|
||||
return prefix
|
||||
|
||||
|
||||
def _split_prefix(fname, prefix):
|
||||
fname_prefix = fname.split("::")[len(prefix):]
|
||||
logger.debug("split_prefix %r -> %r", fname, fname_prefix)
|
||||
return "::".join(fname_prefix)
|
||||
|
||||
|
||||
class _RootStub:
|
||||
nested: dict
|
||||
|
||||
def __init__(self, root):
|
||||
self.nested = root
|
||||
|
||||
|
||||
class TraceWalker:
|
||||
"""Trace stateful processing: convert a sequence of trace events
|
||||
into a call tree."""
|
||||
|
||||
stack: list
|
||||
# Maps from fully-qualified func name to a trace
|
||||
root: dict
|
||||
|
||||
def __init__(self):
|
||||
"""Create a walker."""
|
||||
self.stack = []
|
||||
self.root = {}
|
||||
|
||||
def process(self, records):
|
||||
"""With all input records, building a call tree in the `root` field."""
|
||||
for raw_rec in records:
|
||||
logger.debug("raw_rec %r", raw_rec)
|
||||
message = raw_rec["fields"]["message"]
|
||||
if message in ("enter", "close"):
|
||||
span = raw_rec["span"].get("name", "ERROR_missing_span.name")
|
||||
target = raw_rec.get("target", None)
|
||||
spans = [sp["name"] for sp in raw_rec.get("spans", [])]
|
||||
logger.debug("Message: %r", message)
|
||||
if message == "close":
|
||||
time_busy = raw_rec["fields"].get("time.busy")
|
||||
rec = self.stack.pop()
|
||||
logger.debug("Poped %r from %r", rec, self.stack)
|
||||
real_rec = self._get_closing_rec(rec)
|
||||
assert rec == real_rec, f"{rec!r} vs {real_rec!r}"
|
||||
rec.execution_time += parse_trace_timedelta(time_busy)
|
||||
elif message == "enter":
|
||||
assert span == spans[-1]
|
||||
rec = TraceRecord(message, span, target, None, spans[:-1])
|
||||
self._inject_enter_rec(rec)
|
||||
|
||||
def to_json(self, repeat: int):
|
||||
"""Convert to JSON."""
|
||||
assert not self.stack
|
||||
return {
|
||||
fname: trace_record.to_json(repeat)
|
||||
for (fname, trace_record) in self.root.items()
|
||||
}
|
||||
|
||||
def get_total_time(self, repeat: int):
|
||||
"""Get total execution time."""
|
||||
assert not self.stack
|
||||
root_time = sum(
|
||||
(node.execution_time for node in self.root.values()),
|
||||
start=datetime.timedelta()
|
||||
) / repeat
|
||||
return format_timedelta(root_time)
|
||||
|
||||
def _find_parent(self, rec: TraceRecord) -> TraceRecord:
|
||||
parent = _RootStub(self.root)
|
||||
|
||||
for (sp1, tr2) in zip_longest(rec.spans, self.stack):
|
||||
# Validity check. Should hold for single-threaded app.
|
||||
assert tr2 is not None, f"{rec.spans!r} vs {self.stack!r}"
|
||||
assert sp1 == tr2.get_span(), f"{rec.spans!r} vs {self.stack!r}"
|
||||
parent = parent.nested[tr2.get_func_name()]
|
||||
return parent
|
||||
|
||||
def _inject_enter_rec(self, rec: TraceRecord):
|
||||
parent = self._find_parent(rec)
|
||||
|
||||
fname = rec.get_func_name()
|
||||
if fname not in parent.nested:
|
||||
logger.debug("Inserting %r to %r", rec, parent)
|
||||
parent.nested[fname] = rec
|
||||
else:
|
||||
rec = parent.nested[fname]
|
||||
logger.debug("Push %r to %r", rec, self.stack)
|
||||
self.stack.append(rec)
|
||||
|
||||
def _get_closing_rec(self, rec: TraceRecord):
|
||||
parent = self._find_parent(rec)
|
||||
|
||||
fname = rec.get_func_name()
|
||||
real_rec = parent.nested[fname]
|
||||
return real_rec
|
35
tools/cli/performance_metering/setup.py
Normal file
35
tools/cli/performance_metering/setup.py
Normal file
@ -0,0 +1,35 @@
|
||||
#
|
||||
# Copyright 2023 Fluence Labs Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from setuptools import setup
|
||||
|
||||
|
||||
setup(name='aquavm_performance_metering',
|
||||
version='0.1',
|
||||
description='An AquaVM Performance metering tool',
|
||||
author='Fluence Labs Limited',
|
||||
license='Apache-2.0',
|
||||
packages=['performance_metering'],
|
||||
zip_safe=True,
|
||||
install_requires=[
|
||||
# python 3.11 use standard tomllib, but it is not yet available
|
||||
# everywhere.
|
||||
'toml',
|
||||
],
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'aquavm_performance_metering=performance_metering.main:main',
|
||||
],
|
||||
})
|
Loading…
x
Reference in New Issue
Block a user