Skip to content
GitLab
Menu
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
i4
manycore
emper-io-eval
Commits
ce03d9b7
Commit
ce03d9b7
authored
May 30, 2022
by
Florian Fischer
Browse files
add dataref export
parent
c3aa298a
Changes
2
Hide whitespace changes
Inline
Side-by-side
parse_results.py
View file @
ce03d9b7
...
...
@@ -6,6 +6,7 @@ import csv
import
fnmatch
import
io
import
re
import
sys
from
pathlib
import
Path
from
typing
import
Dict
,
Iterable
,
List
,
Mapping
,
Optional
,
Sequence
,
Union
...
...
@@ -100,7 +101,7 @@ def collect_results(result_dir: Union[Path, str],
exp_data
[
k
]
=
to_number
(
configparser
[
'global'
][
k
])
except
KeyError
:
error
=
True
print
(
f
'ERROR in
{
data_path
}
'
)
print
(
f
'ERROR in
{
data_path
}
'
,
file
=
sys
.
stderr
)
break
if
error
:
...
...
@@ -182,10 +183,11 @@ def calculate_stats(data: EvaluationResults, warn=False) -> EvaluationStats:
if
warn
:
precision
=
cur_stats
[
'std'
]
/
cur_stats
[
'mean'
]
if
precision
>
0.05
:
print
(
'Warning: imprecise data impl:'
,
end
=
''
)
print
(
print
(
(
'Warning: imprecise data impl:'
f
'
{
impl
}
, c:
{
cons
}
, k:
{
key
}
p:
{
precision
*
100
:
.
5
}
%'
)
),
file
=
sys
.
stderr
)
cons_stats
[
key
]
=
cur_stats
...
...
@@ -218,6 +220,23 @@ def print_stats(stats: EvaluationStats, variables: Optional[Iterable]):
print_stats_for_variable
(
variable
,
cons_stats
[
variable
])
def
print_dataref
(
stats
:
EvaluationStats
,
variables
=
None
,
dstats
=
None
):
for
impl
,
impl_stats
in
stats
.
items
():
for
connections
,
conn_stats
in
impl_stats
.
items
():
keys
=
variables
or
conn_stats
.
keys
()
for
key
in
keys
:
desc_stats
=
conn_stats
[
key
]
dstats
=
dstats
or
desc_stats
.
keys
()
for
stat
in
dstats
:
val
=
desc_stats
[
stat
]
if
stat
==
'outliers'
:
continue
print
(
rf
'\drefset{{
{
impl
}
/
{
connections
}
/
{
key
}
/
{
stat
}
}}{{
{
val
}
}}'
)
def
main
():
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
"result_dir"
)
...
...
@@ -242,6 +261,14 @@ def main():
nargs
=
'*'
,
default
=
[
'iops'
])
parser
.
add_argument
(
"--dataref"
,
help
=
"dump stats as dataref"
,
action
=
'store_true'
)
parser
.
add_argument
(
"--desc-stats"
,
help
=
"descriptive statistics to print"
,
nargs
=
'*'
)
args
=
parser
.
parse_args
()
data
=
collect_results
(
args
.
result_dir
,
...
...
@@ -251,6 +278,12 @@ def main():
stats
=
calculate_stats
(
data
,
args
.
warn
)
if
args
.
dataref
:
print_dataref
(
stats
,
variables
=
args
.
print_stats
,
dstats
=
args
.
desc_stats
)
return
if
args
.
print_stats
is
not
None
:
print_stats
(
stats
,
variables
=
args
.
print_stats
)
...
...
plot_tail_latency.py
View file @
ce03d9b7
...
...
@@ -179,7 +179,7 @@ def plot_tail_latency_linear(latency_files, latency, out=None):
pyplot_show_or_save
(
out
=
out
)
def
summarize
(
latency_files
,
latency
):
def
summarize
(
latency_files
,
latency
,
dataref
=
False
):
bar_data
=
get_bar_data
(
latency_files
,
latency
=
latency
,
include_raw_data
=
True
)
...
...
@@ -197,28 +197,50 @@ def summarize(latency_files, latency):
data
[
99.99
],
])
/
1000000
name
=
impl
.
replace
(
'-io'
,
''
).
replace
(
'emper-'
,
''
).
replace
(
'-0us-1ms'
,
''
)
name
=
impl
.
replace
(
'-io'
,
''
).
replace
(
'emper-'
,
''
)
data_min
=
min
(
latencies
)
data_max
=
max
(
latencies
)
print
(
f
'
{
name
}
-min:
{
data_min
:
.
2
f
}
'
)
print
(
f
'
{
name
}
-max:
{
data_max
:
.
2
f
}
'
)
print
(
f
'
{
name
}
-mean:
{
np
.
mean
(
latencies
)
:
.
2
f
}
'
)
print
(
f
'
{
name
}
-median:
{
median
:
.
2
f
}
'
)
print
(
f
'
{
name
}
-p95:
{
p95
:
.
2
f
}
'
)
print
(
f
'
{
name
}
-p99:
{
p99
:
.
2
f
}
'
)
print
(
f
'
{
name
}
-p99.9:
{
p99_9
:
.
2
f
}
'
)
print
(
f
'
{
name
}
-p99.99:
{
p99_99
:
.
2
f
}
'
)
bin_edge_candidates
=
[
data_min
,
median
,
100
,
500
,
1000
,
1750
,
2500
,
5000
,
7500
,
10000
,
13000
,
13500
,
14000
,
data_max
]
bin_edges
=
[
c
for
c
in
bin_edge_candidates
if
c
<=
data_max
]
hist
,
bin_edges
=
np
.
histogram
(
latencies
,
bin_edges
)
print
(
f
'
{
name
}
-hist:
{
list
(
hist
)
}
'
)
print
(
f
'
{
name
}
-hist-bin-edges:
{
[
f
"
{
e
:
.
2
f
}
" for e in bin_edges]
}
'
)
if
not
dataref
:
print
(
f
'
{
name
}
-min:
{
data_min
:
.
2
f
}
'
)
print
(
f
'
{
name
}
-max:
{
data_max
:
.
2
f
}
'
)
print
(
f
'
{
name
}
-mean:
{
np
.
mean
(
latencies
)
:
.
2
f
}
'
)
print
(
f
'
{
name
}
-median:
{
median
:
.
2
f
}
'
)
print
(
f
'
{
name
}
-p95:
{
p95
:
.
2
f
}
'
)
print
(
f
'
{
name
}
-p99:
{
p99
:
.
2
f
}
'
)
print
(
f
'
{
name
}
-p99.9:
{
p99_9
:
.
2
f
}
'
)
print
(
f
'
{
name
}
-p99.99:
{
p99_99
:
.
2
f
}
'
)
bin_edge_candidates
=
[
data_min
,
median
,
100
,
500
,
1000
,
1750
,
2500
,
5000
,
7500
,
10000
,
13000
,
13500
,
14000
,
data_max
]
bin_edges
=
[
c
for
c
in
bin_edge_candidates
if
c
<=
data_max
]
hist
,
bin_edges
=
np
.
histogram
(
latencies
,
bin_edges
)
print
(
f
'
{
name
}
-hist:
{
list
(
hist
)
}
'
)
print
(
f
'
{
name
}
-hist-bin-edges:
{
[
f
"
{
e
:
.
2
f
}
" for e in bin_edges]
}
'
)
else
:
print
(
rf
'\drefset[unit=\unit{{\ms}}]{{
{
name
}
/latency/min}}{{
{
data_min
}
}}'
)
print
(
rf
'\drefset[unit=\unit{{\ms}}]{{
{
name
}
/latency/max}}{{
{
data_max
}
}}'
)
print
(
rf
'\drefset[unit=\unit{{\ms}}]{{
{
name
}
/latency/median}}{{
{
median
}
}}'
)
print
(
rf
'\drefset[unit=\unit{{\ms}}]{{
{
name
}
/latency/p95}}{{
{
p95
}
}}'
)
print
(
rf
'\drefset[unit=\unit{{\ms}}]{{
{
name
}
/latency/p99}}{{
{
p99
}
}}'
)
print
(
rf
'\drefset[unit=\unit{{\ms}}]{{
{
name
}
/latency/p99.9}}{{
{
p99_9
}
}}'
)
print
(
rf
'\drefset[unit=\unit{{\ms}}]{{
{
name
}
/latency/p99.99}}{{
{
p99_99
}
}}'
)
def
main
():
...
...
@@ -232,12 +254,13 @@ def main():
nargs
=
'+'
,
default
=
[
'line'
])
parser
.
add_argument
(
"--summarize"
,
action
=
'store_true'
)
parser
.
add_argument
(
"--dataref"
,
action
=
'store_true'
)
parser
.
add_argument
(
"--out"
,
type
=
str
,
nargs
=
'*'
,
help
=
'Output files'
)
args
=
parser
.
parse_args
()
if
args
.
summarize
:
summarize
(
args
.
data_files
,
args
.
latency
)
summarize
(
args
.
data_files
,
args
.
latency
,
dataref
=
args
.
dataref
)
sys
.
exit
(
0
)
plots
=
{
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment