diff --git a/src/allocator.py b/src/allocator.py index d59264cab4a34eae981aee5470b85d8747eb4c5b..fd3fbbb10bd5efe0bd1cfb37964903a56c7f05d0 100644 --- a/src/allocator.py +++ b/src/allocator.py @@ -7,13 +7,14 @@ import subprocess import sys import src.globalvars -from src.util import * +from src.util import print_status, print_debug, print_error, print_info2 library_path = "" for l in subprocess.run(["ldconfig", "-v"], stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True).stdout.splitlines(): + stderr=subprocess.PIPE, + universal_newlines=True).stdout.splitlines(): + if not l.startswith('\t'): library_path += l @@ -23,6 +24,7 @@ srcdir = os.path.join(builddir, "src") if not os.path.isdir(srcdir): os.makedirs(srcdir) + class Allocator_Sources (object): def __init__(self, name, retrieve_cmds=[], prepare_cmds=[], reset_cmds=[]): self.name = name @@ -45,7 +47,7 @@ class Allocator_Sources (object): if p.returncode: print_error(function, self.name, "failed with", p.returncode, - file=sys.stderr) + file=sys.stderr) print_debug(p.stderr, file=sys.stderr) return False return True @@ -79,7 +81,8 @@ class Allocator_Sources (object): input=f.read()) if p.returncode: - print_error("Patching of", self.name, "failed.", file=sys.stderr) + print_error("Patching of", self.name, "failed.", + file=sys.stderr) print_debug(p.stderr, file=sys.stderr) exit(1) @@ -92,7 +95,8 @@ class Allocator (object): self.name = name self.dir = os.path.join(builddir, self.name) # Update attributes - self.__dict__.update((k, v) for k, v in kwargs.items() if k in self.allowed_attributes) + self.__dict__.update((k, v) for k, v in kwargs.items() + if k in self.allowed_attributes) # create all unset attributes for attr in self.allowed_attributes: @@ -115,7 +119,8 @@ class Allocator (object): build_needed = timestamp < modtime print_debug("Time of last build:", timestamp.isoformat()) - print_debug("Last modification of allocators file:", modtime.isoformat()) + print_debug("Last modification of allocators file:", + modtime.isoformat()) print_info2("Build needed:", build_needed) if build_needed: @@ -150,14 +155,14 @@ class Allocator (object): try: value = getattr(self, attr) setattr(self, attr, value.format(**{"dir": self.dir, - "srcdir": self.sources.dir})) + "srcdir": self.sources.dir})) except AttributeError: setattr(self, attr, "") - res_dict = {"cmd_prefix": self.cmd_prefix, - "binary_suffix": self.binary_suffix or "", - "LD_PRELOAD": self.LD_PRELOAD, - "color": self.color} + res_dict = {"cmd_prefix": self.cmd_prefix, + "binary_suffix": self.binary_suffix or "", + "LD_PRELOAD": self.LD_PRELOAD, + "color": self.color} print_debug("Resulting dictionary:", res_dict) return res_dict diff --git a/src/benchmark.py b/src/benchmark.py index 709005a95104d88412dfe483aa278f9d0fe2d6e9..86445a932f363a67728a0b5e4a04475cffb77011 100644 --- a/src/benchmark.py +++ b/src/benchmark.py @@ -7,20 +7,18 @@ import multiprocessing import numpy as np import os import pickle -import shutil import subprocess from time import sleep import src.globalvars import src.util -from src.util import * +from src.util import print_status, print_error, print_warn +from src.util import print_info0, print_info, print_debug - -# This is useful when evaluating strings in the plot functionsi. str(np.NaN) == "nan" +# This is useful when evaluating strings in the plot functions. str(np.NaN) == "nan" nan = np.NaN - class Benchmark (object): perf_allowed = None @@ -48,8 +46,8 @@ class Benchmark (object): popen.terminate() try: print_info("Subprocess exited with ", popen.wait(timeout=timeout)) - except: - print_error("Killing subprocess ", server.args) + except subprocess.TimeoutExpired: + print_error("Killing subprocess ", popen.args) popen.kill() popen.wait() print_debug("Server Out:", popen.stdout) @@ -122,9 +120,12 @@ class Benchmark (object): measures = [] stats = [] for ntuple in self.iterate_args(args=self.results["args"]): - measures.append((ntuple._asdict(), self.results[allocator][ntuple])) + measures.append((ntuple._asdict(), + self.results[allocator][ntuple])) + if "stats" in self.results: - stats.append((ntuple._asdict(), self.results["stats"][allocator][ntuple])) + stats.append((ntuple._asdict(), + self.results["stats"][allocator][ntuple])) save_data[allocator] = measures if "stats" in self.results: @@ -159,14 +160,14 @@ class Benchmark (object): self.results["stats"][allocator] = d # add missing statistics - if not "stats" in self.results: + if "stats" not in self.results: self.calc_desc_statistics() def prepare(self): - os.environ["PATH"] += os.pathsep + os.path.join("build", "benchmarks", self.name) + os.environ["PATH"] += os.pathsep + "build/benchmarks/" + self.name for r in self.requirements: - exe = find_cmd(r) + exe = src.util.find_cmd(r) if exe is not None: self.results["facts"]["libcs"][r] = src.facter.libc_ver(bin=exe) else: @@ -210,8 +211,8 @@ class Benchmark (object): server_cmd = src.util.prefix_cmd_with_abspath(server_cmd) server_cmd = "{} {} {}".format(self.measure_cmd, - alloc["cmd_prefix"], - server_cmd) + alloc["cmd_prefix"], + server_cmd) server_cmd = server_cmd.format(**substitutions) print_debug(server_cmd) @@ -221,7 +222,7 @@ class Benchmark (object): stderr=subprocess.PIPE, universal_newlines=True) - #TODO check if server is up + # TODO: check if server is up sleep(5) ret = server.poll() @@ -242,7 +243,7 @@ class Benchmark (object): # check if perf is allowed on this system if self.measure_cmd == self.defaults["measure_cmd"]: - if Benchmark.perf_allowed == None: + if Benchmark.perf_allowed is None: print_info("Check if you are allowed to use perf ...") res = subprocess.run(["perf", "stat", "ls"], stdout=subprocess.PIPE, @@ -251,7 +252,7 @@ class Benchmark (object): if res.returncode != 0: print_error("Test perf run failed with:") - print(res.stderr, file=sys.stderr) + print_debug(res.stderr) Benchmark.perf_allowed = False else: Benchmark.perf_allowed = True @@ -281,7 +282,7 @@ class Benchmark (object): # Preallocator hook if hasattr(self, "preallocator_hook"): self.preallocator_hook((alloc_name, alloc), run, env, - verbose=src.globalvars.verbosity) + verbose=src.globalvars.verbosity) # Run benchmark for alloc for perm in self.iterate_args(): @@ -301,8 +302,8 @@ class Benchmark (object): if self.server_cmds == []: actual_cmd = src.util.prefix_cmd_with_abspath(actual_cmd) actual_cmd = "{} {} {}".format(self.measure_cmd, - alloc["cmd_prefix"], - actual_cmd) + alloc["cmd_prefix"], + actual_cmd) # substitute again actual_cmd = actual_cmd.format(**substitutions) @@ -329,22 +330,23 @@ class Benchmark (object): # parse and store results else: if self.server_cmds == []: - # Read VmHWM from status file. If our benchmark didn't fork - # the first occurance of VmHWM is from our benchmark + # Read VmHWM from status file. If our benchmark + # didn't fork the first occurance of VmHWM is from + # our benchmark with open("status", "r") as f: for l in f.readlines(): if l.startswith("VmHWM:"): result["VmHWM"] = l.split()[1] break os.remove("status") - # TODO get VmHWM from servers + # TODO: get VmHWM from servers else: result["server_status"] = [] for server in self.servers: with open("/proc/{}/status".format(server.pid), "r") as f: result["server_status"].append(f.read()) - # Parse perf output if available + # parse perf output if available if self.measure_cmd == self.defaults["measure_cmd"]: csvreader = csv.reader(res.stderr.splitlines(), delimiter=',') @@ -362,10 +364,10 @@ class Benchmark (object): verbose=src.globalvars.verbosity) # save a valid result so we can expand invalid ones - if valid_result != None: + if valid_result is not None: valid_result = result - if not perm in self.results[alloc_name]: + if perm not in self.results[alloc_name]: self.results[alloc_name][perm] = [] self.results[alloc_name][perm].append(result) @@ -377,10 +379,10 @@ class Benchmark (object): print() - # Reset PATH + # reset PATH os.environ["PATH"] = os.environ["PATH"].replace(":build/" + self.name, "") - #expand invalid results + # expand invalid results if valid_result != {}: for allocator in self.allocators: for perm in self.iterate_args(): @@ -454,7 +456,6 @@ class Benchmark (object): else: y_vals.append(eval(yval.format(**self.results["stats"][allocator][perm]["mean"]))) - plt.plot(x_vals, y_vals, marker='.', linestyle='-', label=allocator, color=allocators[allocator]["color"]) @@ -468,8 +469,8 @@ class Benchmark (object): plt.clf() def barplot_single_arg(self, yval, ylabel="'y-label'", xlabel="'x-label'", - title="'default title'", filepostfix="", sumdir="", - arg="", scale=None, file_ext="png"): + title="'default title'", filepostfix="", sumdir="", + arg="", scale=None, file_ext="png"): args = self.results["args"] allocators = self.results["allocators"] @@ -478,7 +479,6 @@ class Benchmark (object): arg = arg or list(args.keys())[0] narg = len(args[arg]) - for i, allocator in enumerate(allocators): x_vals = list(range(i, narg * (nallocators+1), nallocators+1)) y_vals = [] @@ -493,7 +493,6 @@ class Benchmark (object): else: y_vals.append(eval(yval.format(**self.results["stats"][allocator][perm]["mean"]))) - plt.bar(x_vals, y_vals, width=1, label=allocator, color=allocators[allocator]["color"]) @@ -536,7 +535,6 @@ class Benchmark (object): eval_str = yval.format(**eval_dict) y_vals.append(eval(eval_str)) - plt.plot(x_vals, y_vals, marker='.', linestyle='-', label=allocator, color=allocators[allocator]["color"]) @@ -583,7 +581,7 @@ class Benchmark (object): for alloc in allocators: for perm in self.iterate_args(args=args): field_len = len(str(rows[alloc][perm][i])) + 2 - if field_len > widths[i]: + if field_len > widths[i]: widths[i] = field_len with open(path, "w") as f: @@ -610,7 +608,7 @@ class Benchmark (object): path = path + ".dataref" # Example: \drefset{/mysql/glibc/40/Lower-whisker}{71552.0} - line = "\drefset{{/{}/{}/{}/{}}}{{{}}}" + line = "\\drefset{{/{}/{}/{}/{}}}{{{}}}" with open(path, "w") as f: for alloc in allocators: diff --git a/src/benchmarks/dj_trace.py b/src/benchmarks/dj_trace.py index b808f4da817ccaa2415d8ae779914a478a0cd2a7..4d08de42e99912c20b4b75c1361bd0f691a6808c 100644 --- a/src/benchmarks/dj_trace.py +++ b/src/benchmarks/dj_trace.py @@ -8,7 +8,7 @@ import re from src.benchmark import Benchmark from src.util import print_status -comma_sep_number_re = "(?:\d*(?:,\d*)?)*" +comma_sep_number_re = "(?:\\d*(?:,\\d*)?)*" rss_re = "(?P<rss>" + comma_sep_number_re + ")" time_re = "(?P<time>" + comma_sep_number_re + ")" @@ -18,10 +18,10 @@ cpu_time_re = re.compile("^{} usec across.*threads$".format(time_re)) max_rss_re = re.compile("^{} Kb Max RSS".format(rss_re)) ideal_rss_re = re.compile("^{} Kb Max Ideal RSS".format(rss_re)) -malloc_re = re.compile("^Avg malloc time:\s*{} in.*calls$".format(time_re)) -calloc_re = re.compile("^Avg calloc time:\s*{} in.*calls$".format(time_re)) -realloc_re = re.compile("^Avg realloc time:\s*{} in.*calls$".format(time_re)) -free_re = re.compile("^Avg free time:\s*{} in.*calls$".format(time_re)) +malloc_re = re.compile("^Avg malloc time:\\s*{} in.*calls$".format(time_re)) +calloc_re = re.compile("^Avg calloc time:\\s*{} in.*calls$".format(time_re)) +realloc_re = re.compile("^Avg realloc time:\\s*{} in.*calls$".format(time_re)) +free_re = re.compile("^Avg free time:\\s*{} in.*calls$".format(time_re)) class Benchmark_DJ_Trace(Benchmark): @@ -111,16 +111,16 @@ class Benchmark_DJ_Trace(Benchmark): file_name = wl + ".wl" file_path = os.path.join("dj_workloads", file_name) if not os.path.isfile(file_path): - if download_all == None: - choice = input(("Download all missing workloads (upto 6.7GB)" - " [Y/n/x] ")) + if download_all is None: + choice = input(("Download all missing workloads" + " (upto 6.7GB) [Y/n/x] ")) if choice == "x": break else: download_all = choice in ['', 'Y', 'y'] if (not download_all and - input("want to download {} ({}) [Y/n] ".format(wl, wl_sizes[wl])) not in ['', 'Y', 'y']): + input("want to download {} ({}) [Y/n] ".format(wl, wl_sizes[wl])) not in ['', 'Y', 'y']): continue if download_all: @@ -140,7 +140,7 @@ class Benchmark_DJ_Trace(Benchmark): if len(available_workloads) > 0: self.args["workload"] = available_workloads return True - + return False def process_output(self, result, stdout, stderr, allocator, perm, verbose): @@ -187,7 +187,6 @@ class Benchmark_DJ_Trace(Benchmark): plt.savefig(".".join([self.name, perm.workload, "runtime", "png"])) plt.clf() - self.barplot_single_arg("{cputime}/1000", ylabel='"time in ms"', title='"total runtime"', @@ -200,7 +199,7 @@ class Benchmark_DJ_Trace(Benchmark): for i, allocator in enumerate(allocators): x_vals = [x+i/len(allocators) for x in xa] - func_times_means[allocator][perm] = [0,0,0,0] + func_times_means[allocator][perm] = [0, 0, 0, 0] func_times_means[allocator][perm][0] = np.mean([x["avg_malloc"] for x in self.results[allocator][perm]]) func_times_means[allocator][perm][1] = np.mean([x["avg_calloc"] for x in self.results[allocator][perm]]) @@ -230,7 +229,6 @@ class Benchmark_DJ_Trace(Benchmark): ideal_rss = self.results[list(allocators.keys())[0]][perm][0]["Ideal_RSS"]/1000 self.results["stats"]["Ideal_RSS"][perm] = {"mean": {"Max_RSS": ideal_rss}} - self.barplot_single_arg("{Max_RSS}/1000", ylabel='"Max RSS in MB"', title='"Highwatermark of Vm (VmHWM)"', @@ -315,7 +313,7 @@ class Benchmark_DJ_Trace(Benchmark): color = "black" print(s.format(color, m, np.std(t)/m if m else 0), "\\\\", file=f) - print("\end{tabular}", file=f) + print("\\end{tabular}", file=f) print("\\end{document}", file=f) # Create summary similar to DJ's at @@ -377,7 +375,7 @@ class Benchmark_DJ_Trace(Benchmark): times[1], times[2], times[3], rss), file=f) print(file=f) - tmeans = [0,0,0,0] + tmeans = [0, 0, 0, 0] for i in range(0, len(times)): tmeans[i] = np.mean([times[i] for times in times_change_means]) print(fmt_changes.format("Mean:", np.mean(cycles_change_means), diff --git a/src/benchmarks/falsesharing.py b/src/benchmarks/falsesharing.py index f1d69ca3f88de612418cbf88d084f8ff9393e880..85d0a92085056d2cfc568b9e54e82e75253b5215 100644 --- a/src/benchmarks/falsesharing.py +++ b/src/benchmarks/falsesharing.py @@ -4,7 +4,7 @@ import re from src.benchmark import Benchmark -time_re = re.compile("^Time elapsed = (?P<time>\d*\.\d*) seconds.$") +time_re = re.compile("^Time elapsed = (?P<time>\\d*\\.\\d*) seconds.$") class Benchmark_Falsesharing(Benchmark): diff --git a/src/benchmarks/httpd.py b/src/benchmarks/httpd.py index 0e386cc86884c8ea6552b42951986aefb58145c3..2ac7fbcbacc61fdfceebff3c5b8bf72d3dee3862 100644 --- a/src/benchmarks/httpd.py +++ b/src/benchmarks/httpd.py @@ -1,14 +1,6 @@ -import matplotlib.pyplot as plt -import numpy as np import re -import shutil -import subprocess -from subprocess import PIPE -import sys -from time import sleep from src.benchmark import Benchmark -from src.util import * class Benchmark_HTTPD(Benchmark): @@ -28,8 +20,8 @@ class Benchmark_HTTPD(Benchmark): super().__init__() def process_output(self, result, stdout, stderr, allocator, perm, verbose): - result["time"] = re.search("Time taken for tests:\s*(\d*\.\d*) seconds", stdout).group(1) - result["requests"] = re.search("Requests per second:\s*(\d*\.\d*) .*", stdout).group(1) + result["time"] = re.search("Time taken for tests:\\s*(\\d*\\.\\d*) seconds", stdout).group(1) + result["requests"] = re.search("Requests per second:\\s*(\\d*\\.\\d*) .*", stdout).group(1) # with open("/proc/"+str(self.server.pid)+"/status", "r") as f: # for l in f.readlines(): @@ -39,34 +31,33 @@ class Benchmark_HTTPD(Benchmark): def summary(self): allocators = self.results["allocators"] - args = self.results["args"] self.calc_desc_statistics() # linear plot self.plot_fixed_arg("{requests}", - xlabel='"threads"', - ylabel='"requests/s"', - autoticks=False, - filepostfix="requests", - title='"ab -n 10000 -c " + str(perm.nthreads)') + xlabel='"threads"', + ylabel='"requests/s"', + autoticks=False, + filepostfix="requests", + title='"ab -n 10000 -c " + str(perm.nthreads)') # linear plot ref_alloc = list(allocators)[0] self.plot_fixed_arg("{requests}", - xlabel='"threads"', - ylabel='"requests/s scaled at " + scale', - title='"ab -n 10000 -c " + str(perm.nthreads) + " (normalized)"', - filepostfix="requests.norm", - autoticks=False, - scale=ref_alloc) + xlabel='"threads"', + ylabel='"requests/s scaled at " + scale', + title='"ab -n 10000 -c " + str(perm.nthreads) + " (normalized)"', + filepostfix="requests.norm", + autoticks=False, + scale=ref_alloc) # bar plot # self.barplot_fixed_arg("{requests}", - # xlabel='"threads"', - # ylabel='"requests/s"', - # filepostfix="b", - # title='"ab -n 10000 -c threads"') + # xlabel='"threads"', + # ylabel='"requests/s"', + # filepostfix="b", + # title='"ab -n 10000 -c threads"') httpd = Benchmark_HTTPD() diff --git a/src/benchmarks/larson.py b/src/benchmarks/larson.py index e070b7bc4d60f9b9b2a1df0226575162971d0aec..910fdf6d9e66ef1a8614a1e354d9fae52f03d21d 100644 --- a/src/benchmarks/larson.py +++ b/src/benchmarks/larson.py @@ -2,7 +2,7 @@ import re from src.benchmark import Benchmark -throughput_re = re.compile("^Throughput =\s*(?P<throughput>\d+) operations per second.$") +throughput_re = re.compile("^Throughput =\\s*(?P<throughput>\\d+) operations per second.$") class Benchmark_Larson(Benchmark): @@ -43,4 +43,5 @@ class Benchmark_Larson(Benchmark): title="'Larson cache misses: ' + arg + ' ' + str(arg_value)", filepostfix="cachemisses") + larson = Benchmark_Larson() diff --git a/src/benchmarks/mysql.py b/src/benchmarks/mysql.py index fb9c2e7059a450ff72fd43ca3ea6a147a602d28f..d8a5a836e311cb2bd5afa038cf23c4cb93be4245 100644 --- a/src/benchmarks/mysql.py +++ b/src/benchmarks/mysql.py @@ -1,5 +1,4 @@ import copy -import matplotlib.pyplot as plt import multiprocessing import numpy as np import os @@ -8,11 +7,10 @@ import shutil import subprocess from subprocess import PIPE import sys -from time import sleep from src.globalvars import allocators from src.benchmark import Benchmark -from src.util import * +from src.util import print_status, print_debug, print_info2 cwd = os.getcwd() @@ -75,7 +73,7 @@ class Benchmark_MYSQL(Benchmark): self.start_servers() # Create sbtest TABLE - p = subprocess.run(("mysql -u root -S "+cwd+"/mysql_test/socket").split(" "), + p = subprocess.run(("mysql -u root -S " + cwd + "/mysql_test/socket").split(" "), input=b"CREATE DATABASE sbtest;\n", stdout=PIPE, stderr=PIPE) @@ -101,12 +99,12 @@ class Benchmark_MYSQL(Benchmark): shutil.rmtree("mysql_test", ignore_errors=True) def process_output(self, result, stdout, stderr, allocator, perm, verbose): - result["transactions"] = re.search("transactions:\s*(\d*)", stdout).group(1) - result["queries"] = re.search("queries:\s*(\d*)", stdout).group(1) + result["transactions"] = re.search("transactions:\\s*(\\d*)", stdout).group(1) + result["queries"] = re.search("queries:\\s*(\\d*)", stdout).group(1) # Latency - result["min"] = re.search("min:\s*(\d*.\d*)", stdout).group(1) - result["avg"] = re.search("avg:\s*(\d*.\d*)", stdout).group(1) - result["max"] = re.search("max:\s*(\d*.\d*)", stdout).group(1) + result["min"] = re.search("min:\\s*(\\d*.\\d*)", stdout).group(1) + result["avg"] = re.search("avg:\\s*(\\d*.\\d*)", stdout).group(1) + result["max"] = re.search("max:\\s*(\\d*.\\d*)", stdout).group(1) with open("/proc/"+str(self.servers[0].pid)+"/status", "r") as f: for l in f.readlines(): @@ -125,7 +123,6 @@ class Benchmark_MYSQL(Benchmark): title='"sysbench oltp read only"', filepostfix="l") - # normalized linear plot ref_alloc = list(allocators)[0] self.plot_single_arg("{transactions}", @@ -152,10 +149,10 @@ class Benchmark_MYSQL(Benchmark): # Memusage self.barplot_single_arg("{rssmax}", - xlabel='"threads"', - ylabel='"VmHWM in kB"', - title='"Memusage sysbench oltp read only"', - filepostfix="mem") + xlabel='"threads"', + ylabel='"VmHWM in kB"', + title='"Memusage sysbench oltp read only"', + filepostfix="mem") # Colored latex table showing transactions count d = {allocator: {} for allocator in allocators} @@ -203,7 +200,7 @@ class Benchmark_MYSQL(Benchmark): print(s.format(color, m), end=" ", file=f) print("\\\\", file=f) - print("\end{tabular}", file=f) + print("\\end{tabular}", file=f) self.export_to_csv("transactions") self.export_to_dataref("transactions") diff --git a/src/benchmarks/realloc.py b/src/benchmarks/realloc.py index 423bf442b42f30887f72f6c0b7eb61f99781339a..7132d684f43020e98697190c7e93838f2f984cf2 100644 --- a/src/benchmarks/realloc.py +++ b/src/benchmarks/realloc.py @@ -1,5 +1,3 @@ -import matplotlib.pyplot as plt - from src.benchmark import Benchmark @@ -16,9 +14,6 @@ class Benchmark_Realloc(Benchmark): super().__init__() def summary(self): - # bar plot - allocators = self.results["allocators"] - self.barplot_single_arg("{task-clock}", ylabel='"task-clock in ms"', title='"realloc micro benchmark"') diff --git a/src/facter.py b/src/facter.py index 220cb3a86b2679f4b597a6908d2eac8d37c97433..300e12fb7b2535959628156facd82b0055437eef 100644 --- a/src/facter.py +++ b/src/facter.py @@ -19,6 +19,7 @@ def collect_facts(): with open(os.path.join(gv.builddir, "ccinfo"), "r") as ccinfo: gv.facts["cc"] = ccinfo.readlines()[-1][:-1] + # Copied from pip. # https://github.com/pypa/pip/blob/master/src/pip/_internal/utils/glibc.py # Licensed under MIT. @@ -46,6 +47,7 @@ def glibc_version_string(bin=None): return version_str + # platform.libc_ver regularly returns completely nonsensical glibc # versions. E.g. on my computer, platform says: # diff --git a/src/globalvars.py b/src/globalvars.py index 6048f1c34ea07007b96f03b207a35ac82f1bb732..23ffa8a1da7e9b7172e1af5cb8ce36061f306eaf 100644 --- a/src/globalvars.py +++ b/src/globalvars.py @@ -11,9 +11,6 @@ verbosity = 0 """Dict holding the allocators to compare""" allocators = {} -"""File were the allocators definitions are loaded from""" -allocators_file = None - """Root directory of allocbench""" allocbenchdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) allocbenchdir = os.path.dirname(allocbenchdir) diff --git a/src/util.py b/src/util.py index 812d9809608d0aa98150ac8679f06d47e5ee8520..29aa234b2687c14675ed3766f5ac9db7751580fa 100644 --- a/src/util.py +++ b/src/util.py @@ -8,6 +8,7 @@ import src.globalvars def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) + def find_cmd(cmd): fpath, fname = os.path.split(cmd) @@ -24,6 +25,7 @@ def find_cmd(cmd): return None + def prefix_cmd_with_abspath(cmd): """Prefix cmd with the abspath of the first word @@ -32,8 +34,6 @@ def prefix_cmd_with_abspath(cmd): binary_end = cmd.find(" ") binary_end = None if binary_end == -1 else binary_end - cmd_start = len(cmd) if binary_end == None else binary_end - binary_abspath = subprocess.run(["whereis", cmd[0:binary_end]], stdout=subprocess.PIPE, universal_newlines=True).stdout.split()[1] @@ -45,7 +45,6 @@ def allocbench_msg(color, *objects, sep=' ', end='\n', file=sys.stdout): if src.globalvars.verbosity < 0: return - color = {"YELLOW": "\x1b[33m", "GREEN": "\x1b[32m", "RED": "\x1b[31m"}[color] @@ -59,33 +58,40 @@ def allocbench_msg(color, *objects, sep=' ', end='\n', file=sys.stdout): if is_atty: print("\x1b[0m", end="", file=file, flush=True) + def print_debug(*objects, sep=' ', end='\n', file=sys.stdout): if src.globalvars.verbosity < 99: return print(*objects, sep=sep, end=end, file=file) + def print_info(*objects, sep=' ', end='\n', file=sys.stdout): if src.globalvars.verbosity < 1: return print(*objects, sep=sep, end=end, file=file) + def print_info0(*objects, sep=' ', end='\n', file=sys.stdout): if src.globalvars.verbosity < 0: return print(*objects, sep=sep, end=end, file=file) + def print_info2(*objects, sep=' ', end='\n', file=sys.stdout): if src.globalvars.verbosity < 2: return print(*objects, sep=sep, end=end, file=file) + def print_status(*objects, sep=' ', end='\n', file=sys.stdout): allocbench_msg("GREEN", *objects, sep=sep, end=end, file=file) + def print_warn(*objects, sep=' ', end='\n', file=sys.stdout): if src.globalvars.verbosity < 1: return allocbench_msg("YELLOW", *objects, sep=sep, end=end, file=file) + def print_error(*objects, sep=' ', end='\n', file=sys.stderr): allocbench_msg("RED", *objects, sep=sep, end=end, file=file)