diff --git a/Makefile b/Makefile index 5d838dc68f8a9f37c26ffa208acc09d8238484d6..5c375560d1c6ce17072fb9f16fa1ffb3f9d21176 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ CXX = g++ WARNFLAGS = -Wall -Wextra COMMONFLAGS = -fno-builtin -fPIC -DPIC -pthread OPTFLAGS = -O3 -DNDEBUG -#OPTFLAGS = -O0 -g3 +# OPTFLAGS = -O0 -g3 CXXFLAGS = -std=c++11 -I. $(OPTFLAGS) $(WARNFLAGS) $(COMMONFLAGS) -fno-exceptions CFLAGS = -I. $(OPTFLAGS) $(WARNFLAGS) $(COMMONFLAGS) @@ -48,6 +48,14 @@ $(OBJDIR)/trace_run-glibc-notc: $(OBJDIR)/trace_run $(MAKEFILE_LIST) patchelf --set-interpreter $(GLIBC_NOTC)/ld-linux-x86-64.so.2 $@ patchelf --set-rpath $(GLIBC_NOTC) $@ +$(OBJDIR)/larson: $(OBJDIR)/larson.o + $(CXX) -pthread -o $@ $^ + +$(OBJDIR)/larson-glibc-notc: $(OBJDIR)/larson + cp $< $@ + patchelf --set-interpreter $(GLIBC_NOTC)/ld-linux-x86-64.so.2 $@ + patchelf --set-rpath $(GLIBC_NOTC) $@ + $(OBJDIR)/cache-thrash: $(OBJDIR)/cache-thrash.o $(CXX) -pthread -o $@ $^ diff --git a/bench.py b/bench.py index ee1c4122b93b96b9778f84de09598d614fbbe2b1..237afa3b99adc759f7c6eeb5340fa9a9a790422d 100755 --- a/bench.py +++ b/bench.py @@ -10,6 +10,7 @@ from loop import loop # from bench_conprod import conprod from mysql import mysql from dj_trace import dj_trace +from larson import larson parser = argparse.ArgumentParser(description="benchmark memory allocators") parser.add_argument("-s", "--save", help="save benchmark results to disk", action='store_true') @@ -22,7 +23,7 @@ parser.add_argument("-sd", "--summarydir", help="directory where all plots and t parser.add_argument("-a", "--analyse", help="collect allocation sizes", action='store_true') -benchmarks = [loop, mysql, falsesharing, dj_trace] +benchmarks = [loop, mysql, falsesharing, dj_trace, larson] def main(): args = parser.parse_args() diff --git a/larson.py b/larson.py new file mode 100644 index 0000000000000000000000000000000000000000..5ba93fb2b20abedfa867dd3ca33afcaa565a9d30 --- /dev/null +++ b/larson.py @@ -0,0 +1,69 @@ +import csv +import pickle +import matplotlib.pyplot as plt +import multiprocessing +import numpy as np +import os +import re +import subprocess + +from benchmark import Benchmark + +throughput_re = re.compile("^Throughput =\s*(?P<throughput>\d+) operations per second.$") + +class Benchmark_Larson( Benchmark ): + def __init__(self): + self.name = "larson" + self.descrition = """This benchmark is courtesy of Paul Larson at Microsoft + Research. It simulates a server: each thread allocates + and deallocates objects, and then transfers some objects + (randomly selected) to other threads to be freed.""" + + self.cmd = "build/larson{binary_suffix} 1 8 {maxsize} 1000 10000 1 {threads}" + + self.args = { + "maxsize" : [8, 32, 64, 128, 256, 512, 1024], + "threads" : range(1, multiprocessing.cpu_count() * 2 + 1) + } + + self.requirements = ["build/larson"] + super().__init__() + + def process_stdout(self, result, stdout, verbose): + for l in stdout.splitlines(): + res = throughput_re.match(l) + if res: + result["throughput"] = int(res.group("throughput")) + return + print(stdout) + print("no match") + + def summary(self, sd=None): + # Speedup thrash + args = self.results["args"] + nthreads = args["threads"] + targets = self.results["targets"] + + sd = sd or "" + + for arg in args: + loose_arg = [a for a in args if a != arg][0] + for arg_value in args[arg]: + for target in targets: + y_vals = [] + for perm in self.iterate_args_fixed({arg : arg_value}, args=args): + d = [m["throughput"] for m in self.results[target][perm]] + y_vals.append(np.mean(d)) + x_vals = list(range(1, len(y_vals) + 1)) + plt.plot(x_vals, y_vals, marker='.', linestyle='-', + label=target, color=targets[target]["color"]) + plt.legend() + plt.xticks(x_vals, args[loose_arg]) + plt.xlabel(loose_arg) + plt.ylabel("OPS/s") + plt.title("Larson: " + arg + " " + str(arg_value)) + plt.savefig(os.path.join(sd, ".".join([self.name, arg, str(arg_value), "png"]))) + plt.clf() + + +larson = Benchmark_Larson()