diff --git a/src/benchmarks/blowup.py b/src/benchmarks/blowup.py
index d55a0d7ff6e55b5634965eeb04c0c2d2923e5edc..aca9293ea4f8fa757534e8475206ae198bf07427 100644
--- a/src/benchmarks/blowup.py
+++ b/src/benchmarks/blowup.py
@@ -14,7 +14,6 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with allocbench.  If not, see <http://www.gnu.org/licenses/>.
-
 """Definition of the blowup micro benchmark"""
 
 from src.benchmark import Benchmark
@@ -45,8 +44,14 @@ class BenchmarkBlowup(Benchmark):
         allocators["Ideal-RSS"] = {"color": "xkcd:gold"}
         self.results["stats"]["Ideal-RSS"] = {}
         for perm in self.iterate_args(args=self.results["args"]):
-            self.results["stats"]["Ideal-RSS"][perm] = {"mean": {"VmHWM": 1024 * 100}, 
-                                                        "std" : {"VmHWM": 0}}
+            self.results["stats"]["Ideal-RSS"][perm] = {
+                "mean": {
+                    "VmHWM": 1024 * 100
+                },
+                "std": {
+                    "VmHWM": 0
+                }
+            }
 
         self.barplot_single_arg("{VmHWM}/1000",
                                 ylabel='"VmHWM in MB"',
diff --git a/src/benchmarks/cfrac.py b/src/benchmarks/cfrac.py
index e5940558c024b2fe8308aaa1f672c17a8161e4af..dfd87d69cc29162a3e7ae787d0d401a23b7669e5 100644
--- a/src/benchmarks/cfrac.py
+++ b/src/benchmarks/cfrac.py
@@ -14,7 +14,6 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with allocbench.  If not, see <http://www.gnu.org/licenses/>.
-
 """cfrac is a single threaded implementation of the continued fraction factorization algorithm,
 described by Zorn and Grunwald in their paper "Empirical Measurements of Six Allocation-intensive C Programs" in 1992.
 
@@ -61,6 +60,7 @@ API function as well as memory placement strategies with good data locality.
 
 from src.benchmark import Benchmark
 
+
 class BenchmarkCfrac(Benchmark):
     """Definition of the cfrac benchmark"""
     def __init__(self):
@@ -81,11 +81,12 @@ class BenchmarkCfrac(Benchmark):
                                 filepostfix="time")
 
         # L1 cache misses
-        self.barplot_single_arg("({L1-dcache-load-misses}/{L1-dcache-loads})*100",
-                                ylabel='"L1 misses in %"',
-                                title='"Cfrac l1 cache misses"',
-                                filepostfix="l1misses",
-                                yerr=False)
+        self.barplot_single_arg(
+            "({L1-dcache-load-misses}/{L1-dcache-loads})*100",
+            ylabel='"L1 misses in %"',
+            title='"Cfrac l1 cache misses"',
+            filepostfix="l1misses",
+            yerr=False)
 
         # Memusage
         self.barplot_single_arg("{VmHWM}",
@@ -93,12 +94,15 @@ class BenchmarkCfrac(Benchmark):
                                 title='"Cfrac VmHWM"',
                                 filepostfix="vmhwm")
 
-        self.write_tex_table([{"label": "Runtime [ms]",
-                               "expression": "{task-clock}",
-                               "sort": "<"},
-                              {"label": "Memusage [KB]",
-                               "expression": "{VmHWM}",
-                               "sort": "<"}],
+        self.write_tex_table([{
+            "label": "Runtime [ms]",
+            "expression": "{task-clock}",
+            "sort": "<"
+        }, {
+            "label": "Memusage [KB]",
+            "expression": "{VmHWM}",
+            "sort": "<"
+        }],
                              filepostfix="table")
 
         self.export_stats_to_dataref("task-clock")
diff --git a/src/benchmarks/espresso.py b/src/benchmarks/espresso.py
index bd94b29505a4797201e814ce765c4ded82c6f0b4..2f8b8bfe255c05d62901ff9fc3d9230800b30a2c 100644
--- a/src/benchmarks/espresso.py
+++ b/src/benchmarks/espresso.py
@@ -14,7 +14,6 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with allocbench.  If not, see <http://www.gnu.org/licenses/>.
-
 """espresso is a single threaded programmable logic array analyzer, described by Zorn and Grunwald
 their paper "Empirical Measurements of Six Allocation-intensive C Programs" in 1992.
 
@@ -61,14 +60,19 @@ import os
 from src.benchmark import Benchmark
 import src.globalvars
 
+
 class BenchmarkEspresso(Benchmark):
     """Definition of the espresso benchmark for allocbench"""
     def __init__(self):
         name = "espresso"
 
         self.cmd = "espresso{binary_suffix} {file}"
-        self.args = {"file": [os.path.join(src.globalvars.benchsrcdir, name,
-                                           "largest.espresso")]}
+        self.args = {
+            "file": [
+                os.path.join(src.globalvars.benchsrcdir, name,
+                             "largest.espresso")
+            ]
+        }
 
         self.requirements = ["espresso"]
         super().__init__(name)
@@ -81,11 +85,12 @@ class BenchmarkEspresso(Benchmark):
                                 filepostfix="time")
 
         # L1 cache misses
-        self.barplot_single_arg("({L1-dcache-load-misses}/{L1-dcache-loads})*100",
-                                ylabel='"L1 misses in %"',
-                                title='"Espresso l1 cache misses"',
-                                filepostfix="l1misses",
-                                yerr=False)
+        self.barplot_single_arg(
+            "({L1-dcache-load-misses}/{L1-dcache-loads})*100",
+            ylabel='"L1 misses in %"',
+            title='"Espresso l1 cache misses"',
+            filepostfix="l1misses",
+            yerr=False)
 
         # Memusage
         self.barplot_single_arg("{VmHWM}",
@@ -93,12 +98,15 @@ class BenchmarkEspresso(Benchmark):
                                 title='"Espresso VmHWM"',
                                 filepostfix="vmhwm")
 
-        self.write_tex_table([{"label": "Runtime [ms]",
-                               "expression": "{task-clock}",
-                               "sort": "<"},
-                              {"label": "Memusage [KB]",
-                               "expression": "{VmHWM}",
-                               "sort": "<"}],
+        self.write_tex_table([{
+            "label": "Runtime [ms]",
+            "expression": "{task-clock}",
+            "sort": "<"
+        }, {
+            "label": "Memusage [KB]",
+            "expression": "{VmHWM}",
+            "sort": "<"
+        }],
                              filepostfix="table")
 
         self.export_stats_to_dataref("task-clock")
diff --git a/src/benchmarks/falsesharing.py b/src/benchmarks/falsesharing.py
index 671dbc8942beee9f49a90ec238d13ff2276d5425..530ca99df3ec9a4527b9c92558c177ca2d7d4f99 100644
--- a/src/benchmarks/falsesharing.py
+++ b/src/benchmarks/falsesharing.py
@@ -14,7 +14,6 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with allocbench.  If not, see <http://www.gnu.org/licenses/>.
-
 """Definition of the falsesahring benchmark"""
 
 import re
@@ -25,7 +24,6 @@ import numpy as np
 from src.benchmark import Benchmark
 from src.globalvars import summary_file_ext
 
-
 TIME_RE = re.compile("^Time elapsed = (?P<time>\\d*\\.\\d*) seconds.$")
 
 
@@ -36,14 +34,15 @@ class BenchmarkFalsesharing(Benchmark):
     times. If the allocated objects are on the same cache line the writes
     will be expensive because of cache thrashing.
     """
-
     def __init__(self):
         name = "falsesharing"
 
         self.cmd = "cache-{bench}{binary_suffix} {threads} 100 8 10000000"
 
-        self.args = {"bench": ["thrash", "scratch"],
-                     "threads": Benchmark.scale_threads_for_cpus(1)}
+        self.args = {
+            "bench": ["thrash", "scratch"],
+            "threads": Benchmark.scale_threads_for_cpus(1)
+        }
 
         self.requirements = ["cache-thrash", "cache-scratch"]
         super().__init__(name)
@@ -62,13 +61,18 @@ class BenchmarkFalsesharing(Benchmark):
             for allocator in allocators:
 
                 sequential_perm = self.Perm(bench=bench, threads=1)
-                for perm in self.iterate_args_fixed({"bench": bench}, args=args):
+                for perm in self.iterate_args_fixed({"bench": bench},
+                                                    args=args):
                     speedup = []
                     l1chache_misses = []
                     for i, measure in enumerate(self.results[allocator][perm]):
-                        sequential_time =  float(self.results[allocator][sequential_perm][i]["time"])
-                        measure["speedup"] = sequential_time / float(measure["time"])
-                        measure["l1chache_misses"] = eval("({L1-dcache-load-misses}/{L1-dcache-loads})*100".format(**measure))
+                        sequential_time = float(self.results[allocator]
+                                                [sequential_perm][i]["time"])
+                        measure["speedup"] = sequential_time / float(
+                            measure["time"])
+                        measure["l1chache_misses"] = eval(
+                            "({L1-dcache-load-misses}/{L1-dcache-loads})*100".
+                            format(**measure))
 
         # delete and recalculate stats
         del self.results["stats"]
@@ -81,23 +85,27 @@ class BenchmarkFalsesharing(Benchmark):
                             autoticks=False,
                             fixed=["bench"])
 
-        self.plot_fixed_arg("{l1chache_misses}",
-                            ylabel="'l1 cache misses in %'",
-                            title="'cache misses: ' + arg + ' ' + str(arg_value)",
-                            filepostfix="l1-misses",
-                            autoticks=False,
-                            fixed=["bench"])
-
-        self.plot_fixed_arg("({LLC-load-misses}/{LLC-loads})*100",
-                            ylabel="'llc cache misses in %'",
-                            title="'LLC misses: ' + arg + ' ' + str(arg_value)",
-                            filepostfix="llc-misses",
-                            autoticks=False,
-                            fixed=["bench"])
-
-        self.write_tex_table([{"label": "Speedup",
-                               "expression": "{speedup}",
-                               "sort":">"}],
+        self.plot_fixed_arg(
+            "{l1chache_misses}",
+            ylabel="'l1 cache misses in %'",
+            title="'cache misses: ' + arg + ' ' + str(arg_value)",
+            filepostfix="l1-misses",
+            autoticks=False,
+            fixed=["bench"])
+
+        self.plot_fixed_arg(
+            "({LLC-load-misses}/{LLC-loads})*100",
+            ylabel="'llc cache misses in %'",
+            title="'LLC misses: ' + arg + ' ' + str(arg_value)",
+            filepostfix="llc-misses",
+            autoticks=False,
+            fixed=["bench"])
+
+        self.write_tex_table([{
+            "label": "Speedup",
+            "expression": "{speedup}",
+            "sort": ">"
+        }],
                              filepostfix="speedup.table")
 
         self.export_stats_to_csv("speedup", "time")
diff --git a/src/benchmarks/fd.py b/src/benchmarks/fd.py
index 910b267219ff5bbaacae37707caaa3c6a757f74b..a341f71169d52ee78177c60d6a6da4afd7d75083 100644
--- a/src/benchmarks/fd.py
+++ b/src/benchmarks/fd.py
@@ -14,7 +14,6 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with allocbench.  If not, see <http://www.gnu.org/licenses/>.
-
 """Definition of the fd benchmark"""
 
 import os
@@ -31,17 +30,18 @@ from src.util import print_info
 class BenchmarkFd(Benchmark):
     """fd benchmark
     """
-
     def __init__(self):
         name = "fd"
         super().__init__(name)
-        
+
         self.cmd = "fd -HI -e c '.*[0-9].*' {linux_files}"
 
     def prepare(self):
         super().prepare()
 
-        linux = GitArtifact("linux", "git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git")
+        linux = GitArtifact(
+            "linux",
+            "git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git")
         linux_version = "v5.3"
         self.linux_files = linux.provide(linux_version)
 
@@ -53,18 +53,19 @@ class BenchmarkFd(Benchmark):
         fd_url = ("https://github.com/sharkdp/fd/releases/latest/download/"
                   f"fd-{fd_version}-x86_64-unknown-linux-gnu.tar.gz")
 
-        fd = ArchiveArtifact("fd", fd_url, "tar", "a5d8e7c8484449aa324a46abfdfaf026d7de77ee")
+        fd = ArchiveArtifact("fd", fd_url, "tar",
+                             "a5d8e7c8484449aa324a46abfdfaf026d7de77ee")
 
         fd_dir = os.path.join(self.build_dir, "fd_sources")
         fd.provide(fd_dir)
 
         # create symlinks
         for exe in ["fd"]:
-            src = os.path.join(fd_dir, f"fd-{fd_version}-x86_64-unknown-linux-gnu", exe)
+            src = os.path.join(fd_dir,
+                               f"fd-{fd_version}-x86_64-unknown-linux-gnu",
+                               exe)
             dest = os.path.join(self.build_dir, exe)
             os.link(src, dest)
-        
-
 
     def summary(self):
         self.barplot_single_arg("{task-clock}",
diff --git a/src/benchmarks/larson.py b/src/benchmarks/larson.py
index 662728877052395637992d7dc41ebc85852b48e0..6e55fbef7a710edf740908502167d07363de6d78 100644
--- a/src/benchmarks/larson.py
+++ b/src/benchmarks/larson.py
@@ -14,7 +14,6 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with allocbench.  If not, see <http://www.gnu.org/licenses/>.
-
 """Larson server benchmark
 
 This benchmark was build by Paul Larson at Microsoft Research. It
@@ -50,12 +49,12 @@ import re
 
 from src.benchmark import Benchmark
 
-THROUGHPUT_RE = re.compile("^Throughput =\\s*(?P<throughput>\\d+) operations per second.$")
+THROUGHPUT_RE = re.compile(
+    "^Throughput =\\s*(?P<throughput>\\d+) operations per second.$")
 
 
 class BenchmarkLarson(Benchmark):
     """Definition of the larson benchmark"""
-
     def __init__(self):
         name = "larson"
 
@@ -63,8 +62,10 @@ class BenchmarkLarson(Benchmark):
         # Applications" from Larson and Krishnan
         self.cmd = "larson{binary_suffix} 5 8 {maxsize} 1000 50000 1 {threads}"
 
-        self.args = {"maxsize": [64, 512, 1024],
-                     "threads": Benchmark.scale_threads_for_cpus(2)}
+        self.args = {
+            "maxsize": [64, 512, 1024],
+            "threads": Benchmark.scale_threads_for_cpus(2)
+        }
 
         self.requirements = ["larson"]
         super().__init__(name)
@@ -84,10 +85,11 @@ class BenchmarkLarson(Benchmark):
                             title="'Larson: ' + arg + ' ' + str(arg_value)",
                             filepostfix="throughput")
 
-        self.plot_fixed_arg("({L1-dcache-load-misses}/{L1-dcache-loads})*100",
-                            ylabel="'l1 cache misses in %'",
-                            title="'Larson cache misses: ' + arg + ' ' + str(arg_value)",
-                            filepostfix="cachemisses")
+        self.plot_fixed_arg(
+            "({L1-dcache-load-misses}/{L1-dcache-loads})*100",
+            ylabel="'l1 cache misses in %'",
+            title="'Larson cache misses: ' + arg + ' ' + str(arg_value)",
+            filepostfix="cachemisses")
 
 
 larson = BenchmarkLarson()
diff --git a/src/benchmarks/lld.py b/src/benchmarks/lld.py
index 39db15523802ea1d5b5a405a3d1f8113069f29ec..3657896d64736e77a619d5bc740e4da28bf32ab7 100644
--- a/src/benchmarks/lld.py
+++ b/src/benchmarks/lld.py
@@ -14,7 +14,6 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with allocbench.  If not, see <http://www.gnu.org/licenses/>.
-
 """llvm-lld speed benchmark
 
 This benchmark runs the lld speed benchmark provided by the llvm project.
@@ -210,7 +209,6 @@ from src.globalvars import summary_file_ext
 
 class BenchmarkLld(Benchmark):
     """LLVM-lld speed benchmark definition"""
-
     def __init__(self):
         name = "lld"
 
@@ -218,9 +216,13 @@ class BenchmarkLld(Benchmark):
         # TODO: don't hardcode ld.lld location
         self.cmd = "/usr/bin/ld.lld @response.txt"
 
-        self.args = {"test": ["chrome", "clang-fsds", "gold", "linux-kernel",
-                              "llvm-as-fsds", "scylla", "clang", "clang-gdb-index",
-                              "gold-fsds", "llvm-as", "mozilla"]}
+        self.args = {
+            "test": [
+                "chrome", "clang-fsds", "gold", "linux-kernel", "llvm-as-fsds",
+                "scylla", "clang", "clang-gdb-index", "gold-fsds", "llvm-as",
+                "mozilla"
+            ]
+        }
 
         self.measure_cmd = "perf stat -x, -d time -f %M,KB,VmHWM"
         self.measure_cmd_csv = True
@@ -231,49 +233,53 @@ class BenchmarkLld(Benchmark):
         super().prepare()
 
         # save lld version
-        self.results["facts"]["versions"]["lld"] = src.facter.exe_version("ld.lld", "-v")
+        self.results["facts"]["versions"]["lld"] = src.facter.exe_version(
+            "ld.lld", "-v")
 
-        tests = ArchiveArtifact("lld-speed-test",
-                                "https://s3-us-west-2.amazonaws.com/linker-tests/lld-speed-test.tar.xz",
-                                "tar",
-                                "2d449a11109c7363f67fd45513b42270f5ba2a92")
+        tests = ArchiveArtifact(
+            "lld-speed-test",
+            "https://s3-us-west-2.amazonaws.com/linker-tests/lld-speed-test.tar.xz",
+            "tar", "2d449a11109c7363f67fd45513b42270f5ba2a92")
         self.test_dir = tests.provide()
 
     def cleanup(self):
         for perm in self.iterate_args():
-            a_out = os.path.join(self.test_dir, "lld-speed-test", perm.test, "a.out")
+            a_out = os.path.join(self.test_dir, "lld-speed-test", perm.test,
+                                 "a.out")
             if os.path.isfile(a_out):
                 os.remove(a_out)
 
     def summary(self):
         args = self.results["args"]
         allocators = self.results["allocators"]
+        stats = self.results["stats"]
 
         for perm in self.iterate_args(args=args):
             for i, allocator in enumerate(allocators):
 
                 plt.bar([i],
-                        self.results["stats"][allocator][perm]["mean"]["task-clock"],
-                        yerr=self.results["stats"][allocator][perm]["std"]["task-clock"],
-                        label=allocator, color=allocators[allocator]["color"])
+                        stats[allocator][perm]["mean"]["task-clock"],
+                        yerr=stats[allocator][perm]["std"]["task-clock"],
+                        label=allocator,
+                        color=allocators[allocator]["color"])
 
             plt.legend(loc="best")
             plt.ylabel("time in ms")
             plt.title(f"Runtime {perm.test}")
-            plt.savefig(".".join([self.name, perm.test, "runtime", summary_file_ext]))
+            plt.savefig(f"{self.name}.{perm.test}.runtime.{summary_file_ext}")
             plt.clf()
 
-            for i, allocator in enumerate(allocators):
-
+            for i, alloc in enumerate(allocators):
                 plt.bar([i],
-                        self.results["stats"][allocator][perm]["mean"]["VmHWM"] / 1000,
-                        yerr=self.results["stats"][allocator][perm]["std"]["VmHWM"] / 1000,
-                        label=allocator, color=allocators[allocator]["color"])
+                        stats[alloc][perm]["mean"]["VmHWM"] / 1000,
+                        yerr=stats[alloc][perm]["std"]["VmHWM"] / 1000,
+                        label=alloc,
+                        color=allocators[alloc]["color"])
 
             plt.legend(loc="best")
             plt.ylabel("Max RSS in MB")
             plt.title(f"Max RSS {perm.test}")
-            plt.savefig(".".join([self.name, perm.test, "rss", summary_file_ext]))
+            plt.savefig(f"{self.name}.{perm.test}.rss.{summary_file_ext}")
             plt.clf()
 
         # self.export_stats_to_csv("VmHWM")
@@ -282,10 +288,12 @@ class BenchmarkLld(Benchmark):
         # self.export_stats_to_dataref("VmHWM")
         self.export_stats_to_dataref("task-clock")
 
-        self.write_tex_table([{"label": "Runtime [ms]",
-                               "expression": "{task-clock}",
-                               "sort": "<"}],
-                            filepostfix="table")
+        self.write_tex_table([{
+            "label": "Runtime [ms]",
+            "expression": "{task-clock}",
+            "sort": "<"
+        }],
+                             filepostfix="table")
 
 
 lld = BenchmarkLld()
diff --git a/src/benchmarks/loop.py b/src/benchmarks/loop.py
index 407aac12d74f284ac2e147c2588be27878ff0f87..4ab7d46446cad7a7b5e75972ad811785e2203435 100644
--- a/src/benchmarks/loop.py
+++ b/src/benchmarks/loop.py
@@ -14,7 +14,6 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with allocbench.  If not, see <http://www.gnu.org/licenses/>.
-
 """Definition of the loop micro benchmark
 
 This benchmark allocates and immediately deallocates a pseudo random sized allocation
@@ -43,14 +42,15 @@ class BenchmarkLoop(Benchmark):
 
     This benchmark allocates and frees n blocks in t concurrent threads.
     """
-
     def __init__(self):
         name = "loop"
 
         self.cmd = "loop{binary_suffix} {nthreads} 1000000 {maxsize}"
 
-        self.args = {"maxsize":  [2 ** x for x in range(6, 16)],
-                     "nthreads": Benchmark.scale_threads_for_cpus(2)}
+        self.args = {
+            "maxsize": [2**x for x in range(6, 16)],
+            "nthreads": Benchmark.scale_threads_for_cpus(2)
+        }
 
         self.requirements = ["loop"]
         super().__init__(name)
@@ -64,19 +64,22 @@ class BenchmarkLoop(Benchmark):
                             autoticks=False)
 
         # L1 cache misses
-        self.plot_fixed_arg("({L1-dcache-load-misses}/{L1-dcache-loads})*100",
-                            ylabel='"L1 misses in %"',
-                            title='"Loop l1 cache misses: " + arg + " " + str(arg_value)',
-                            filepostfix="l1misses",
-                            autoticks=False)
+        self.plot_fixed_arg(
+            "({L1-dcache-load-misses}/{L1-dcache-loads})*100",
+            ylabel='"L1 misses in %"',
+            title='"Loop l1 cache misses: " + arg + " " + str(arg_value)',
+            filepostfix="l1misses",
+            autoticks=False)
 
         # Speed Matrix
-        self.write_best_doublearg_tex_table("perm.nthreads / ({task-clock}/1000)",
-                                            filepostfix="time.matrix")
-
-        self.write_tex_table([{"label":  "MOPS/s",
-                               "expression": "perm.nthreads / ({task-clock}/1000)",
-                               "sort":">"}],
+        self.write_best_doublearg_tex_table(
+            "perm.nthreads / ({task-clock}/1000)", filepostfix="time.matrix")
+
+        self.write_tex_table([{
+            "label": "MOPS/s",
+            "expression": "perm.nthreads / ({task-clock}/1000)",
+            "sort": ">"
+        }],
                              filepostfix="mops.table")
 
         self.export_stats_to_csv("task-clock")
diff --git a/src/benchmarks/mysql.py b/src/benchmarks/mysql.py
index c7519c5e5fd4aa91ba7e13c07c3779d2fba742a9..d5c0cb9255cb69bafaed8b0eb1b1d3ed7228797b 100644
--- a/src/benchmarks/mysql.py
+++ b/src/benchmarks/mysql.py
@@ -14,7 +14,6 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with allocbench.  If not, see <http://www.gnu.org/licenses/>.
-
 """sysbench SQL read-only benchmark
 
 This benchmark is heavily inspired by a blog post from Alexey Stroganov from Percona:
@@ -86,31 +85,34 @@ from src.benchmark import Benchmark
 import src.facter
 from src.util import print_status, print_debug, print_info2
 
-MYSQL_USER = "root"
-RUN_TIME = 300
+MYSQL_USER = "fischerling"
+RUN_TIME = 10
 TABLES = 5
 
-PREPARE_CMD = (f"sysbench oltp_read_only --db-driver=mysql --mysql-user={MYSQL_USER} "
-               f"--threads={multiprocessing.cpu_count()} "
-               f"--mysql-socket={{build_dir}}/socket --tables={TABLES} --table-size=1000000 prepare")
+PREPARE_CMD = (
+    f"sysbench oltp_read_only --db-driver=mysql --mysql-user={MYSQL_USER} "
+    f"--threads={multiprocessing.cpu_count()} "
+    f"--mysql-socket={{build_dir}}/socket --tables={TABLES} --table-size=1000000 prepare"
+)
 
-CMD = (f"sysbench oltp_read_only --threads={{nthreads}} --time={RUN_TIME} --tables={TABLES} "
-       f"--db-driver=mysql --mysql-user={MYSQL_USER} --mysql-socket={{build_dir}}/socket run")
+CMD = (
+    f"sysbench oltp_read_only --threads={{nthreads}} --time={RUN_TIME} --tables={TABLES} "
+    f"--db-driver=mysql --mysql-user={MYSQL_USER} --mysql-socket={{build_dir}}/socket run"
+)
 
-SERVER_CMD = ("mysqld --no-defaults -h {build_dir} --socket={build_dir}/socket --port=123456 "
-              f"--max-connections={multiprocessing.cpu_count()} --secure-file-priv=")
+SERVER_CMD = (
+    "mysqld --no-defaults -h {build_dir} --socket={build_dir}/socket --port=123456 "
+    f"--max-connections={multiprocessing.cpu_count()} --secure-file-priv=")
 
 
 class BenchmarkMYSQL(Benchmark):
     """Mysql bechmark definition"""
-
     def __init__(self):
         name = "mysql"
 
         self.args = {"nthreads": Benchmark.scale_threads_for_cpus(1)}
         self.cmd = CMD
-        self.servers = [{"name": "mysqld",
-                         "cmd" : SERVER_CMD}]
+        self.servers = [{"name": "mysqld", "cmd": SERVER_CMD}]
         self.measure_cmd = ""
 
         self.requirements = ["mysqld", "sysbench"]
@@ -124,7 +126,8 @@ class BenchmarkMYSQL(Benchmark):
 
         # save mysqld and sysbench versions
         for exe in self.requirements:
-            self.results["facts"]["versions"][exe] = src.facter.exe_version(exe, "--version")
+            self.results["facts"]["versions"][exe] = src.facter.exe_version(
+                exe, "--version")
 
         # Setup Test Environment
         if not os.path.exists(self.build_dir):
@@ -133,10 +136,15 @@ class BenchmarkMYSQL(Benchmark):
 
             # Init database
             if "MariaDB" in self.results["facts"]["versions"]["mysqld"]:
-                init_db_cmd = ["mysql_install_db", "--basedir=/usr", f"--datadir={self.build_dir}"]
+                init_db_cmd = [
+                    "mysql_install_db", "--basedir=/usr",
+                    f"--datadir={self.build_dir}"
+                ]
                 print_info2("MariaDB detected")
             else:
-                init_db_cmd = ["mysqld", "-h", self.build_dir, "--initialize-insecure"]
+                init_db_cmd = [
+                    "mysqld", "-h", self.build_dir, "--initialize-insecure"
+                ]
                 print_info2("Oracle MySQL detected")
 
             p = subprocess.run(init_db_cmd, stdout=PIPE, stderr=PIPE)
@@ -150,37 +158,45 @@ class BenchmarkMYSQL(Benchmark):
             self.start_servers()
 
             # Create sbtest TABLE
-            p = subprocess.run(f"mysql -u {MYSQL_USER} -S {self.build_dir}/socket".split(),
-                               input=b"CREATE DATABASE sbtest;\n",
-                               stdout=PIPE, stderr=PIPE, cwd=self.build_dir)
+            p = subprocess.run(
+                f"mysql -u {MYSQL_USER} -S {self.build_dir}/socket".split(),
+                input=b"CREATE DATABASE sbtest;\n",
+                stdout=PIPE,
+                stderr=PIPE,
+                cwd=self.build_dir)
 
             if p.returncode != 0:
                 print_debug("Stderr:", p.stderr, file=sys.stderr)
-                raise Exception("Creating test tables failed with:", p.returncode)
+                raise Exception("Creating test tables failed with:",
+                                p.returncode)
 
             print_status("Prepare test tables ...")
             prepare_cmd = PREPARE_CMD.format(build_dir=self.build_dir)
             p = subprocess.run(prepare_cmd.split(), stdout=PIPE, stderr=PIPE)
             if p.returncode != 0:
-                print_debug(f"Cmd: {prepare_cmd} failed with {p.returncode}", file=sys.stderr)
+                print_debug(f"Cmd: {prepare_cmd} failed with {p.returncode}",
+                            file=sys.stderr)
                 print_debug("Stdout:", p.stdout, file=sys.stderr)
                 print_debug("Stderr:", p.stderr, file=sys.stderr)
-                raise Exception("Preparing test tables failed with:", p.returncode)
+                raise Exception("Preparing test tables failed with:",
+                                p.returncode)
 
             self.shutdown_servers()
 
     def process_output(self, result, stdout, stderr, allocator, perm):
-        result["transactions"] = re.search("transactions:\\s*(\\d*)", stdout).group(1)
+        result["transactions"] = re.search("transactions:\\s*(\\d*)",
+                                           stdout).group(1)
         result["queries"] = re.search("queries:\\s*(\\d*)", stdout).group(1)
         # Latency
         result["min"] = re.search("min:\\s*(\\d*.\\d*)", stdout).group(1)
         result["avg"] = re.search("avg:\\s*(\\d*.\\d*)", stdout).group(1)
         result["max"] = re.search("max:\\s*(\\d*.\\d*)", stdout).group(1)
 
-        with open("/proc/"+str(self.servers[0]["popen"].pid)+"/status", "r") as f:
+        with open(f"/proc/{self.servers[0]['popen'].pid}/status", "r") as f:
             for l in f.readlines():
                 if l.startswith("VmHWM:"):
-                    result["rssmax"] = int(l.replace("VmHWM:", "").strip().split()[0])
+                    result["rssmax"] = int(
+                        l.replace("VmHWM:", "").strip().split()[0])
                     break
 
     def summary(self):
@@ -225,22 +241,27 @@ class BenchmarkMYSQL(Benchmark):
                                 title='"Memusage sysbench oltp read only"',
                                 filepostfix="mem")
 
-        self.write_tex_table([{"label": "Transactions",
-                               "expression": "{transactions}",
-                               "sort": ">"},
-                              {"label": "Memusage [KB]",
-                               "expression": "{rssmax}",
-                               "sort": "<"}],
+        self.write_tex_table([{
+            "label": "Transactions",
+            "expression": "{transactions}",
+            "sort": ">"
+        }, {
+            "label": "Memusage [KB]",
+            "expression": "{rssmax}",
+            "sort": "<"
+        }],
                              filepostfix="table")
 
         # Colored latex table showing transactions count
         d = {allocator: {} for allocator in allocators}
         for perm in self.iterate_args(args=args):
             for allocator in allocators:
-                transactions = [float(measure["transactions"])
-                                for measure in self.results[allocator][perm]]
+                transactions = [
+                    float(measure["transactions"])
+                    for measure in self.results[allocator][perm]
+                ]
                 mean = np.mean(transactions)
-                std = np.std(transactions)/mean
+                std = np.std(transactions) / mean
                 d[allocator][perm] = {"mean": mean, "std": std}
 
         mins = {}
@@ -260,7 +281,7 @@ class BenchmarkMYSQL(Benchmark):
         fname = ".".join([self.name, "transactions.tex"])
         headers = [perm.nthreads for perm in self.iterate_args(args=args)]
         with open(fname, "w") as f:
-            print("\\begin{tabular}{| l" + " l"*len(headers) + " |}", file=f)
+            print("\\begin{tabular}{| l" + " l" * len(headers) + " |}", file=f)
             print("Fäden / Allokator ", end=" ", file=f)
             for head in headers:
                 print("& {}".format(head), end=" ", file=f)
diff --git a/src/benchmarks/raxmlng.py b/src/benchmarks/raxmlng.py
index 811721a874241014085c0e22867140ac94d285ec..dd405f55340e366c7165603f835ef6225ae08c42 100644
--- a/src/benchmarks/raxmlng.py
+++ b/src/benchmarks/raxmlng.py
@@ -14,7 +14,6 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with allocbench.  If not, see <http://www.gnu.org/licenses/>.
-
 """Definition of the RAxML-ng benchmark"""
 
 import os
@@ -27,21 +26,20 @@ from src.artifact import GitArtifact
 from src.benchmark import Benchmark
 from src.util import print_info
 
-
 RUNTIME_RE = re.compile("Elapsed time: (?P<runtime>(\\d*.\\d*)) seconds")
 
 
 class BenchmarkRaxmlng(Benchmark):
     """RAxML-ng benchmark
     """
-
     def __init__(self):
         name = "raxmlng"
 
         super().__init__(name)
-        
-        self.cmd = (f"raxml-ng --msa {self.build_dir}/data/prim.phy --model GTR+G"
-                    " --redo --threads 2 --seed 2")
+
+        self.cmd = (
+            f"raxml-ng --msa {self.build_dir}/data/prim.phy --model GTR+G"
+            " --redo --threads 2 --seed 2")
 
     def prepare(self):
         super().prepare()
@@ -49,36 +47,41 @@ class BenchmarkRaxmlng(Benchmark):
         if os.path.exists(self.build_dir):
             return
 
-        raxmlng_sources = GitArtifact("raxml-ng", "https://github.com/amkozlov/raxml-ng")
+        raxmlng_sources = GitArtifact("raxml-ng",
+                                      "https://github.com/amkozlov/raxml-ng")
         raxmlng_version = "0.9.0"
         raxmlng_dir = os.path.join(self.build_dir, "raxml-ng-git")
         raxmlng_builddir = os.path.join(raxmlng_dir, "build")
         self.results["facts"]["versions"]["raxml-ng"] = raxmlng_version
         raxmlng_sources.provide(raxmlng_version, raxmlng_dir)
-            
+
         # Create builddir
         os.makedirs(raxmlng_builddir, exist_ok=True)
 
         # building raxml-ng
-        proc = subprocess.run(["cmake", ".."],
-                              cwd=raxmlng_builddir,
-                              # stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-                              universal_newlines=True)
-
-        proc = subprocess.run(["make"],
-                              cwd=raxmlng_builddir,
-                              # stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-                              universal_newlines=True)
+        proc = subprocess.run(
+            ["cmake", ".."],
+            cwd=raxmlng_builddir,
+            # stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+            universal_newlines=True)
+
+        proc = subprocess.run(
+            ["make"],
+            cwd=raxmlng_builddir,
+            # stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+            universal_newlines=True)
 
         # create symlinks
         for exe in ["raxml-ng"]:
             src = os.path.join(raxmlng_dir, "bin", exe)
-            dest = os.path.join(self.build_dir,exe)
+            dest = os.path.join(self.build_dir, exe)
             os.link(src, dest)
 
-        raxmlng_data = GitArtifact("raxml-ng-data", "https://github.com/amkozlov/ng-tutorial")
+        raxmlng_data = GitArtifact("raxml-ng-data",
+                                   "https://github.com/amkozlov/ng-tutorial")
         raxmlng_data_dir = os.path.join(self.build_dir, "data")
-        raxmlng_data.provide("f8f0b6a057a11397b4dad308440746e3436db8b4", raxmlng_data_dir)
+        raxmlng_data.provide("f8f0b6a057a11397b4dad308440746e3436db8b4",
+                             raxmlng_data_dir)
 
     def cleanup(self):
         for direntry in os.listdir():
diff --git a/src/benchmarks/realloc.py b/src/benchmarks/realloc.py
index ca954d0bd6c0ac5a81c34fab6e236639dc83ae82..952913fde68cf2d4ce312063a718606000de7a05 100644
--- a/src/benchmarks/realloc.py
+++ b/src/benchmarks/realloc.py
@@ -14,7 +14,6 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with allocbench.  If not, see <http://www.gnu.org/licenses/>.
-
 """Definition of the realloc micro benchmark"""
 
 from src.benchmark import Benchmark
diff --git a/src/benchmarks/redis.py b/src/benchmarks/redis.py
index 4768c78f858fc78844ef23af94f275422c649dbf..827b14c5a64a82c871b339766ce74011c7455326 100644
--- a/src/benchmarks/redis.py
+++ b/src/benchmarks/redis.py
@@ -14,7 +14,6 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with allocbench.  If not, see <http://www.gnu.org/licenses/>.
-
 """Definition of the redis benchmark
 
 
@@ -32,20 +31,20 @@ from src.artifact import ArchiveArtifact
 from src.benchmark import Benchmark
 from src.util import print_info
 
-
 REQUESTS_RE = re.compile("(?P<requests>(\\d*.\\d*)) requests per second")
 
 
 class BenchmarkRedis(Benchmark):
     """Definition of the redis benchmark"""
-
     def __init__(self):
         name = "redis"
 
         self.cmd = "redis-benchmark 1000000 -n 1000000 -P 8 -q lpush a 1 2 3 4 5 6 7 8 9 10 lrange a 1 10"
-        self.servers = [{"name": "redis",
-                         "cmd": "redis-server",
-                         "shutdown_cmds": ["{build_dir}/redis-cli shutdown"]}]
+        self.servers = [{
+            "name": "redis",
+            "cmd": "redis-server",
+            "shutdown_cmds": ["{build_dir}/redis-cli shutdown"]
+        }]
 
         super().__init__(name)
 
@@ -54,19 +53,20 @@ class BenchmarkRedis(Benchmark):
 
         redis_version = "5.0.5"
         self.results["facts"]["versions"]["redis"] = redis_version
-        redis = ArchiveArtifact("redis",
-                                f"http://download.redis.io/releases/redis-{redis_version}.tar.gz",
-                                "tar",
-                                "71e38ae09ac70012b5bc326522b976bcb8e269d6")
+        redis = ArchiveArtifact(
+            "redis",
+            f"http://download.redis.io/releases/redis-{redis_version}.tar.gz",
+            "tar", "71e38ae09ac70012b5bc326522b976bcb8e269d6")
 
         redis_dir = os.path.join(self.build_dir, f"redis-{redis_version}")
 
         redis.provide(self.build_dir)
 
         # building redis
-        proc = subprocess.run(["make", "-C", redis_dir, "MALLOC=libc", "USE_JEMALLOC=no"],
-                              # stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-                              universal_newlines=True)
+        proc = subprocess.run(
+            ["make", "-C", redis_dir, "MALLOC=libc", "USE_JEMALLOC=no"],
+            # stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+            universal_newlines=True)
 
         # create symlinks
         for exe in ["redis-cli", "redis-server", "redis-benchmark"]:
diff --git a/src/benchmarks/t_test1.py b/src/benchmarks/t_test1.py
index cbb9051cfb38324129c538bfb2757bef895d23a4..bdc7c83e4ac0f2b834d321bfdcbe8fd9fad9da72 100644
--- a/src/benchmarks/t_test1.py
+++ b/src/benchmarks/t_test1.py
@@ -14,7 +14,6 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with allocbench.  If not, see <http://www.gnu.org/licenses/>.
-
 """Definition of the commonly used t-test1 allocator test"""
 
 from src.benchmark import Benchmark
@@ -25,14 +24,15 @@ class BenchmarkTTest1(Benchmark):
 
     This benchmark from ptmalloc2 allocates and frees n bins in t concurrent threads.
     """
-
     def __init__(self):
         name = "t_test1"
 
         self.cmd = "t-test1 {nthreads} {nthreads} 1000000 {maxsize}"
 
-        self.args = {"maxsize":  [2 ** x for x in range(6, 18)],
-                     "nthreads": Benchmark.scale_threads_for_cpus(2)}
+        self.args = {
+            "maxsize": [2**x for x in range(6, 18)],
+            "nthreads": Benchmark.scale_threads_for_cpus(2)
+        }
 
         self.requirements = ["t-test1"]
         super().__init__(name)
@@ -48,18 +48,21 @@ class BenchmarkTTest1(Benchmark):
                             autoticks=False)
 
         # L1 cache misses
-        self.plot_fixed_arg("({L1-dcache-load-misses}/{L1-dcache-loads})*100",
-                            ylabel='"L1 misses in %"',
-                            title='"T-Test1 l1 cache misses: " + arg + " " + str(arg_value)',
-                            filepostfix="l1misses",
-                            autoticks=False)
+        self.plot_fixed_arg(
+            "({L1-dcache-load-misses}/{L1-dcache-loads})*100",
+            ylabel='"L1 misses in %"',
+            title='"T-Test1 l1 cache misses: " + arg + " " + str(arg_value)',
+            filepostfix="l1misses",
+            autoticks=False)
 
         # Speed Matrix
         self.write_best_doublearg_tex_table(yval, filepostfix="mops.matrix")
 
-        self.write_tex_table([{"label": "MOPS/s",
-                               "expression": yval,
-                               "sort": ">"}],
+        self.write_tex_table([{
+            "label": "MOPS/s",
+            "expression": yval,
+            "sort": ">"
+        }],
                              filepostfix="mops.table")
 
         self.export_stats_to_csv("task-clock")