Select Git revision
eval.py 7.27 KiB
#!/usr/bin/env python3
"""Evaluate different emper variants and their io latency"""
import argparse
import copy
import datetime
import fnmatch
import os
import subprocess
import sys
from pathlib import Path
import platform
import typing as T
import yaml
from summarize import collect, summarize, calc_stats, calc_avgs
ROOT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
EMPER_ROOT = ROOT_DIR / 'emper'
ARTIFACT_DESC = subprocess.check_output(
'git describe --dirty --always'.split(), cwd=ROOT_DIR, text=True)[:-1]
TARGETS = {
'baseline': {
'cmd': f'{EMPER_ROOT}/build-vanilla/eval/io_latency --data baseline'
},
'iouring': {
'cmd': f'{EMPER_ROOT}/build-vanilla/eval/io_latency --data iouring'
},
}
EMPER_VARIANTS: dict[str, dict[str, str]] = {
'vanilla': {},
'no-sleep': {},
'pipe': {},
'pipe-no-hint': {},
'pipe-no-comp': {},
'io-stealing': {},
'io-stealing-lockless': {},
'io-stealing-pipe': {},
'io-stealing-lockless-pipe': {},
'io-stealing-pipe-no-comp': {},
'io-stealing-lockless-pipe-no-comp': {},
'single-uring': {},
}
for variant, _desc in EMPER_VARIANTS.items():
desc = dict(_desc)
if 'cmd' not in desc:
desc['cmd'] = f'{EMPER_ROOT}/build-{variant}/eval/io_latency --data'
TARGETS[f'emper-{variant}'] = desc
def filter_targets(include, exclude):
"""Apply an include and exclude filter to the targets
The filters use POSIX globbing"""
targets = copy.copy(TARGETS)
if include:
filtered_targets = {}
for inc in include:
filtered_targets.update(
{t: targets[t]
for t in fnmatch.filter(targets.keys(), inc)})
targets = filtered_targets
if exclude:
filtered_targets = {}
for exp in exclude:
filtered_targets.update({
t: targets[t]
for t in targets
if t not in fnmatch.filter(targets.keys(), exp)
})
targets = filtered_targets
return targets
RESULTS_ROOT = ROOT_DIR / 'results'
def prepare_env(update_env: T.MutableMapping) -> T.Dict:
"""Update and return the a copy of os.environ with a new mapping"""
current_env = dict(os.environ)
current_env.update(update_env)
return current_env
PERF_EXE = 'perf'
PERF_EVENT_SELECTION = '-dd'
def main(args):
"""Run an evaluation"""
for target, target_conf in TARGETS.items():
cmd = target_conf['cmd']
if args.flamegraph:
perf_out = RESULT_DIR / f'{target}.perf.data'
cmd = f'{PERF_EXE} record --call-graph dwarf -o {perf_out} {cmd}'
elif args.perf_stats or args.perf_record:
perf_event_selection = ','.join(
args.perf_stats) if args.perf_stats else PERF_EVENT_SELECTION
if args.perf_record:
perf_out = RESULT_DIR / f'{target}.perf.data'
cmd = f'{PERF_EXE} record -g {perf_event_selection} -o {perf_out} {cmd}'
else:
perf_out = RESULT_DIR / f'{target}.perf.stats'
cmd = f'{PERF_EXE} stat {perf_event_selection} -x, -o {perf_out} {cmd}'
print(f"measuring {target} ...\u001b[K\r", end='')
stats_file = RESULT_DIR / f'{target}.stats'
if args.verbose:
print(f'Measure {target} using: {cmd}')
target_env = target_conf.get('env', {})
target_env['EMPER_STATS_FILE'] = stats_file
if args.worker_count:
target_env['EMPER_WORKER_COUNT'] = str(args.worker_count)
out_path = RESULT_DIR / f'{target}.out'
err_path = RESULT_DIR / f'{target}.err'
with open(out_path, 'w', encoding='utf-8') as out_file, open(
err_path, 'w', encoding='utf-8') as err_file:
subprocess.run(cmd.split(),
check=True,
stdout=out_file,
stderr=err_file,
env=prepare_env(target_env))
# delete empty files
if not os.path.getsize(err_path):
os.remove(err_path)
def generate_flamegraphs(result_dir):
"""generate flamegraphs from recorded perf data files"""
for path in result_dir.iterdir():
if path.suffix != '.data':
continue
print(f'\rGenerating flamgraph from {path.name} ...\u001b[K', end='')
cmd = f'{ROOT_DIR/"tools"/"generate-flamegraph.sh"} {path}'
subprocess.run(cmd.split(), check=True)
print()
def write_desc():
"""Write a YAML description of the experiment"""
desc_file_path = RESULT_DIR / 'desc.yml'
with open(desc_file_path, 'w', encoding='utf-8') as desc_file:
print(f'cmd: {" ".join(sys.argv)}', file=desc_file)
uname = os.uname()
print(
(f'uname_client: {uname.sysname} {uname.nodename} {uname.release} '
f'{uname.version} {uname.machine}'),
file=desc_file)
print('targets:', file=desc_file)
for target in TARGETS:
print(f' name: {target}', file=desc_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v',
'--verbose',
help='show build output',
action='store_true')
parser.add_argument("-w",
"--worker-count",
help="number of EMPER worker threads",
type=int)
parser.add_argument("-i",
"--implementations",
help="implementations to plot",
nargs='+')
parser.add_argument("-ix",
"--exclude-implementations",
help="implementations to exclude",
nargs='+')
parser.add_argument('--desc-stats',
help='file to store descriptive statistics',
type=str)
parser.add_argument('--perf-stats',
help='use perf to collect performance counter stats',
nargs='*')
parser.add_argument('--perf-record',
help='use perf to record a profile',
action='store_true')
parser.add_argument('--flamegraph',
help='generate flamegraphs',
action='store_true')
_args = parser.parse_args()
RESULT_DIR = (RESULTS_ROOT / f'{ARTIFACT_DESC}-{platform.uname().node}' /
datetime.datetime.now().strftime("%Y-%m-%dT%H_%M_%S"))
os.makedirs(RESULT_DIR)
print(f'Save results at: {RESULT_DIR}')
TARGETS = filter_targets(_args.implementations,
_args.exclude_implementations)
write_desc()
main(_args)
if _args.flamegraph:
print()
generate_flamegraphs(RESULT_DIR)
sys.exit(0)
_data = collect(result_dir=RESULT_DIR)
if _data is None:
print(f'Error: no data was collected from {RESULT_DIR}',
file=sys.stderr)
sys.exit(1)
if _args.desc_stats:
stats = calc_stats(_data)
with open(_args.desc_stats, 'w', encoding='utf-8') as desc_stats_file:
print(yaml.safe_dump(stats), file=desc_stats_file)
else:
print('\n### Summary ###')
avgs = calc_avgs(_data)
sys.exit(summarize(avgs=avgs))