wandb_utils.py 24.6 KB
Newer Older
1
"""Utilities and tools for tracking runs with Weights & Biases."""
2

3
import logging
4
import os
5
import sys
6
from contextlib import contextmanager
7
from pathlib import Path
Glenn Jocher's avatar
Glenn Jocher committed
8
9

import yaml
10
from tqdm import tqdm
11

12
13
14
FILE = Path(__file__).absolute()
sys.path.append(FILE.parents[3].as_posix())  # add yolov5/ to path

15
16
from utils.datasets import LoadImagesAndLabels
from utils.datasets import img2label_paths
17
from utils.general import check_dataset, check_file
18
19
20

try:
    import wandb
21
22
23

    assert hasattr(wandb, '__version__')  # verify package import not local dir
except (ImportError, AssertionError):
24
25
    wandb = None

26
RANK = int(os.getenv('RANK', -1))
27
28
29
WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'


30
def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX):
31
32
33
    return from_string[len(prefix):]


34
35
36
37
38
39
def check_wandb_config_file(data_config_file):
    wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1))  # updated data.yaml path
    if Path(wandb_config).is_file():
        return wandb_config
    return data_config_file

40

41
42
43
44
def get_run_info(run_path):
    run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX))
    run_id = run_path.stem
    project = run_path.parent.stem
45
    entity = run_path.parent.parent.stem
46
    model_artifact_name = 'run_' + run_id + '_model'
47
    return entity, project, run_id, model_artifact_name
48

49

50
def check_wandb_resume(opt):
51
    process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None
52
53
    if isinstance(opt.resume, str):
        if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
54
            if RANK not in [-1, 0]:  # For resuming DDP runs
55
                entity, project, run_id, model_artifact_name = get_run_info(opt.resume)
56
                api = wandb.Api()
57
                artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest')
58
59
60
                modeldir = artifact.download()
                opt.weights = str(Path(modeldir) / "last.pt")
            return True
61
62
    return None

63

64
def process_wandb_config_ddp_mode(opt):
65
    with open(check_file(opt.data), errors='ignore') as f:
66
        data_dict = yaml.safe_load(f)  # data dict
67
    train_dir, val_dir = None, None
68
    if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX):
69
70
71
72
73
        api = wandb.Api()
        train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias)
        train_dir = train_artifact.download()
        train_path = Path(train_dir) / 'data/images/'
        data_dict['train'] = str(train_path)
74

75
    if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX):
76
77
78
79
80
81
        api = wandb.Api()
        val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias)
        val_dir = val_artifact.download()
        val_path = Path(val_dir) / 'data/images/'
        data_dict['val'] = str(val_path)
    if train_dir or val_dir:
82
        ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml')
83
        with open(ddp_data_path, 'w') as f:
84
            yaml.safe_dump(data_dict, f)
85
        opt.data = ddp_data_path
86

87

88
class WandbLogger():
89
90
91
92
93
94
95
96
97
98
99
100
    """Log training runs, datasets, models, and predictions to Weights & Biases.

    This logger sends information to W&B at wandb.ai. By default, this information
    includes hyperparameters, system configuration and metrics, model metrics,
    and basic data metrics and analyses.

    By providing additional command line arguments to train.py, datasets,
    models and predictions can also be logged.

    For more on how this logger is used, see the Weights & Biases documentation:
    https://docs.wandb.com/guides/integrations/yolov5
    """
101

102
    def __init__(self, opt, run_id, job_type='Training'):
103
        """
104
105
106
107
108
109
110
111
112
        - Initialize WandbLogger instance
        - Upload dataset if opt.upload_dataset is True
        - Setup trainig processes if job_type is 'Training'
        
        arguments:
        opt (namespace) -- Commandline arguments for this run
        run_id (str) -- Run ID of W&B run to be resumed
        job_type (str) -- To set the job_type for this run 

113
       """
114
115
        # Pre-training routine --
        self.job_type = job_type
Glenn Jocher's avatar
Glenn Jocher committed
116
117
118
119
120
121
122
        self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run
        self.val_artifact, self.train_artifact = None, None
        self.train_artifact_path, self.val_artifact_path = None, None
        self.result_artifact = None
        self.val_table, self.result_table = None, None
        self.bbox_media_panel_images = []
        self.val_table_path_map = None
123
        self.max_imgs_to_log = 16
124
125
        self.wandb_artifact_data_dict = None
        self.data_dict = None
126
        # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call
127
        if isinstance(opt.resume, str):  # checks resume from artifact
128
            if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
129
                entity, project, run_id, model_artifact_name = get_run_info(opt.resume)
130
131
132
                model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name
                assert wandb, 'install wandb to resume wandb runs'
                # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config
133
134
135
136
137
                self.wandb_run = wandb.init(id=run_id,
                                            project=project,
                                            entity=entity,
                                            resume='allow',
                                            allow_val_change=True)
138
139
                opt.resume = model_artifact_name
        elif self.wandb:
140
141
142
            self.wandb_run = wandb.init(config=opt,
                                        resume="allow",
                                        project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
143
                                        entity=opt.entity,
144
                                        name=opt.name if opt.name != 'exp' else None,
145
                                        job_type=job_type,
146
147
                                        id=run_id,
                                        allow_val_change=True) if not wandb.run else wandb.run
148
        if self.wandb_run:
149
150
            if self.job_type == 'Training':
                if not opt.resume:
151
152
153
154
                    if opt.upload_dataset:
                        self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt)

                    elif opt.data.endswith('_wandb.yaml'):  # When dataset is W&B artifact
155
                        with open(opt.data, errors='ignore') as f:
156
157
158
159
                            data_dict = yaml.safe_load(f)
                        self.data_dict = data_dict
                    else:  # Local .yaml dataset file or .zip file
                        self.data_dict = check_dataset(opt.data)
Ayush Chaurasia's avatar
Ayush Chaurasia committed
160
161
                else:
                    self.data_dict = check_dataset(opt.data)
162
163
164
165

                self.setup_training(opt)
                if not self.wandb_artifact_data_dict:
                    self.wandb_artifact_data_dict = self.data_dict
166
167
168
169
                # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming.
                if not opt.resume:
                    self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict},
                                                 allow_val_change=True)
170

171
172
173
174
            if self.job_type == 'Dataset Creation':
                self.data_dict = self.check_and_upload_dataset(opt)

    def check_and_upload_dataset(self, opt):
175
        """
176
177
178
179
180
181
182
        Check if the dataset format is compatible and upload it as W&B artifact
        
        arguments:
        opt (namespace)-- Commandline arguments for current run
        
        returns:
        Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links.
183
        """
184
        assert wandb, 'Install wandb to upload dataset'
185
        config_path = self.log_dataset_artifact(opt.data,
186
187
                                                opt.single_cls,
                                                'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem)
188
        print("Created dataset config file ", config_path)
189
        with open(config_path, errors='ignore') as f:
190
            wandb_data_dict = yaml.safe_load(f)
191
        return wandb_data_dict
192

193
    def setup_training(self, opt):
194
        """
195
196
197
198
199
200
201
202
        Setup the necessary processes for training YOLO models:
          - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX
          - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded
          - Setup log_dict, initialize bbox_interval 
        
        arguments:
        opt (namespace) -- commandline arguments for this run
        
203
        """
204
        self.log_dict, self.current_epoch = {}, 0
205
206
207
208
209
210
211
        self.bbox_interval = opt.bbox_interval
        if isinstance(opt.resume, str):
            modeldir, _ = self.download_model_artifact(opt)
            if modeldir:
                self.weights = Path(modeldir) / "last.pt"
                config = self.wandb_run.config
                opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str(
Ayush Chaurasia's avatar
Ayush Chaurasia committed
212
                    self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \
213
                                                                                                       config.hyp
214
            data_dict = dict(self.wandb_run.config.data_dict)  # eliminates the need for config file to resume
215
216
        else:
            data_dict = self.data_dict
Glenn Jocher's avatar
Glenn Jocher committed
217
        if self.val_artifact is None:  # If --upload_dataset is set, use the existing artifact, don't download
218
219
220
221
            self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'),
                                                                                           opt.artifact_alias)
            self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'),
                                                                                       opt.artifact_alias)
222

Glenn Jocher's avatar
Glenn Jocher committed
223
224
225
226
227
228
229
        if self.train_artifact_path is not None:
            train_path = Path(self.train_artifact_path) / 'data/images/'
            data_dict['train'] = str(train_path)
        if self.val_artifact_path is not None:
            val_path = Path(self.val_artifact_path) / 'data/images/'
            data_dict['val'] = str(val_path)

230
        if self.val_artifact is not None:
231
            self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation")
232
            self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"])
Glenn Jocher's avatar
Glenn Jocher committed
233
234
235
            self.val_table = self.val_artifact.get("val")
            if self.val_table_path_map is None:
                self.map_val_table_path()
236
237
        if opt.bbox_interval == -1:
            self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1
238
239
240
241
        train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None
        # Update the the data_dict to point to local artifacts dir
        if train_from_artifact:
            self.data_dict = data_dict
242
243

    def download_dataset_artifact(self, path, alias):
244
        """
245
246
247
248
249
250
251
252
253
        download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX
        
        arguments:
        path -- path of the dataset to be used for training
        alias (str)-- alias of the artifact to be download/used for training
        
        returns:
        (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset
        is found otherwise returns (None, None)
254
        """
255
        if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX):
256
            artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias)
Glenn Jocher's avatar
Glenn Jocher committed
257
            dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/"))
258
259
260
261
262
            assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'"
            datadir = dataset_artifact.download()
            return datadir, dataset_artifact
        return None, None

263
    def download_model_artifact(self, opt):
264
        """
265
266
267
268
        download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX
        
        arguments:
        opt (namespace) -- Commandline arguments for this run
269
        """
270
271
272
273
274
275
        if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
            model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest")
            assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist'
            modeldir = model_artifact.download()
            epochs_trained = model_artifact.metadata.get('epochs_trained')
            total_epochs = model_artifact.metadata.get('total_epochs')
276
277
            is_finished = total_epochs is None
            assert not is_finished, 'training is finished, can only resume incomplete runs.'
278
279
            return modeldir, model_artifact
        return None, None
280

281
    def log_model(self, path, opt, epoch, fitness_score, best_model=False):
282
        """
283
284
285
286
287
288
289
290
        Log the model checkpoint as W&B artifact
        
        arguments:
        path (Path)   -- Path of directory containing the checkpoints
        opt (namespace) -- Command line arguments for this run
        epoch (int)  -- Current epoch number
        fitness_score (float) -- fitness score for current epoch 
        best_model (boolean) -- Boolean representing if the current checkpoint is the best yet.
291
        """
292
293
        model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={
            'original_url': str(path),
294
            'epochs_trained': epoch + 1,
295
296
            'save period': opt.save_period,
            'project': opt.project,
297
298
            'total_epochs': opt.epochs,
            'fitness_score': fitness_score
299
300
        })
        model_artifact.add_file(str(path / 'last.pt'), name='last.pt')
301
        wandb.log_artifact(model_artifact,
302
                           aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else ''])
303
304
        print("Saving model artifact on epoch ", epoch + 1)

305
    def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False):
306
        """
307
308
309
310
311
312
313
314
315
316
317
        Log the dataset as W&B artifact and return the new data file with W&B links
        
        arguments:
        data_file (str) -- the .yaml file with information about the dataset like - path, classes etc.
        single_class (boolean)  -- train multi-class data as single-class
        project (str) -- project name. Used to construct the artifact path
        overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new 
        file with _wandb postfix. Eg -> data_wandb.yaml
        
        returns:
        the new .yaml file with artifact links. it can be used to start training directly from artifacts
318
        """
319
320
        self.data_dict = check_dataset(data_file)  # parse and check
        data = dict(self.data_dict)
321
322
323
        nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names'])
        names = {k: v for k, v in enumerate(names)}  # to index dictionary
        self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(
324
            data['train'], rect=True, batch_size=1), names, name='train') if data.get('train') else None
325
        self.val_artifact = self.create_dataset_table(LoadImagesAndLabels(
326
            data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None
327
328
329
330
        if data.get('train'):
            data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train')
        if data.get('val'):
            data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val')
331
332
        path = Path(data_file).stem
        path = (path if overwrite_config else path + '_wandb') + '.yaml'  # updated data.yaml path
333
        data.pop('download', None)
334
        data.pop('path', None)
335
        with open(path, 'w') as f:
336
            yaml.safe_dump(data, f)
337
338
339
340
341
342
343
344
345
346
347
348
349

        if self.job_type == 'Training':  # builds correct artifact pipeline graph
            self.wandb_run.use_artifact(self.val_artifact)
            self.wandb_run.use_artifact(self.train_artifact)
            self.val_artifact.wait()
            self.val_table = self.val_artifact.get('val')
            self.map_val_table_path()
        else:
            self.wandb_run.log_artifact(self.train_artifact)
            self.wandb_run.log_artifact(self.val_artifact)
        return path

    def map_val_table_path(self):
350
        """
351
352
        Map the validation dataset Table like name of file -> it's id in the W&B Table.
        Useful for - referencing artifacts for evaluation.
353
        """
Glenn Jocher's avatar
Glenn Jocher committed
354
        self.val_table_path_map = {}
355
356
        print("Mapping dataset")
        for i, data in enumerate(tqdm(self.val_table.data)):
Glenn Jocher's avatar
Glenn Jocher committed
357
            self.val_table_path_map[data[3]] = data[0]
358
359

    def create_dataset_table(self, dataset, class_to_id, name='dataset'):
360
        """
361
362
363
364
365
366
367
368
369
        Create and return W&B artifact containing W&B Table of the dataset.
        
        arguments:
        dataset (LoadImagesAndLabels) -- instance of LoadImagesAndLabels class used to iterate over the data to build Table
        class_to_id (dict(int, str)) -- hash map that maps class ids to labels
        name (str) -- name of the artifact
        
        returns:
        dataset artifact to be logged or used
370
        """
371
        # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging
372
        artifact = wandb.Artifact(name=name, type="dataset")
373
374
375
        img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None
        img_files = tqdm(dataset.img_files) if not img_files else img_files
        for img_file in img_files:
376
377
378
379
380
381
382
383
384
385
            if Path(img_file).is_dir():
                artifact.add_dir(img_file, name='data/images')
                labels_path = 'labels'.join(dataset.path.rsplit('images', 1))
                artifact.add_dir(labels_path, name='data/labels')
            else:
                artifact.add_file(img_file, name='data/images/' + Path(img_file).name)
                label_file = Path(img2label_paths([img_file])[0])
                artifact.add_file(str(label_file),
                                  name='data/labels/' + label_file.name) if label_file.exists() else None
        table = wandb.Table(columns=["id", "train_image", "Classes", "name"])
386
        class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()])
387
388
        for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)):
            box_data, img_classes = [], {}
389
            for cls, *xywh in labels[:, 1:].tolist():
390
                cls = int(cls)
391
                box_data.append({"position": {"middle": [xywh[0], xywh[1]], "width": xywh[2], "height": xywh[3]},
392
                                 "class_id": cls,
393
                                 "box_caption": "%s" % (class_to_id[cls])})
394
395
                img_classes[cls] = class_to_id[cls]
            boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}}  # inference-space
396
            table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()),
397
                           Path(paths).name)
398
        artifact.add(table, name)
399
400
401
        return artifact

    def log_training_progress(self, predn, path, names):
402
        """
403
404
405
406
407
408
        Build evaluation Table. Uses reference from validation dataset table.
        
        arguments:
        predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class]
        path (str): local path of the current evaluation image 
        names (dict(int, str)): hash map that maps class ids to labels
409
        """
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
        class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()])
        box_data = []
        total_conf = 0
        for *xyxy, conf, cls in predn.tolist():
            if conf >= 0.25:
                box_data.append(
                    {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
                     "class_id": int(cls),
                     "box_caption": "%s %.3f" % (names[cls], conf),
                     "scores": {"class_score": conf},
                     "domain": "pixel"})
                total_conf = total_conf + conf
        boxes = {"predictions": {"box_data": box_data, "class_labels": names}}  # inference-space
        id = self.val_table_path_map[Path(path).name]
        self.result_table.add_data(self.current_epoch,
                                   id,
                                   self.val_table.data[id][1],
                                   wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set),
                                   total_conf / max(1, len(box_data))
                                   )
430

Glenn Jocher's avatar
Glenn Jocher committed
431
    def val_one_image(self, pred, predn, path, names, im):
432
        """
433
434
435
436
437
438
        Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel
        
        arguments:
        pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]
        predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class]
        path (str): local path of the current evaluation image 
439
        """
440
        if self.val_table and self.result_table:  # Log Table if Val dataset is uploaded as artifact
Glenn Jocher's avatar
Glenn Jocher committed
441
            self.log_training_progress(predn, path, names)
442

443
444
445
446
447
448
449
450
451
        if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0:
            if self.current_epoch % self.bbox_interval == 0:
                box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
                             "class_id": int(cls),
                             "box_caption": "%s %.3f" % (names[cls], conf),
                             "scores": {"class_score": conf},
                             "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
                boxes = {"predictions": {"box_data": box_data, "class_labels": names}}  # inference-space
                self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name))
Glenn Jocher's avatar
Glenn Jocher committed
452

453
    def log(self, log_dict):
454
        """
455
456
457
458
        save the metrics to the logging dictionary
        
        arguments:
        log_dict (Dict) -- metrics/media to be logged in current step
459
        """
460
461
462
463
        if self.wandb_run:
            for key, value in log_dict.items():
                self.log_dict[key] = value

464
    def end_epoch(self, best_result=False):
465
        """
466
467
468
469
        commit the log_dict, model artifacts and Tables to W&B and flush the log_dict.
        
        arguments:
        best_result (boolean): Boolean representing if the result of this evaluation is best or not
470
        """
471
        if self.wandb_run:
472
            with all_logging_disabled():
Glenn Jocher's avatar
Glenn Jocher committed
473
474
                if self.bbox_media_panel_images:
                    self.log_dict["Bounding Box Debugger/Images"] = self.bbox_media_panel_images
475
476
                wandb.log(self.log_dict)
                self.log_dict = {}
Glenn Jocher's avatar
Glenn Jocher committed
477
                self.bbox_media_panel_images = []
478
            if self.result_artifact:
479
                self.result_artifact.add(self.result_table, 'result')
480
                wandb.log_artifact(self.result_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch),
481
                                                                  ('best' if best_result else '')])
Glenn Jocher's avatar
Glenn Jocher committed
482

483
484
                wandb.log({"evaluation": self.result_table})
                self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"])
485
                self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation")
486
487

    def finish_run(self):
488
        """
489
        Log metrics if any and finish the current W&B run
490
        """
491
492
        if self.wandb_run:
            if self.log_dict:
493
494
                with all_logging_disabled():
                    wandb.log(self.log_dict)
495
            wandb.run.finish()
496
497
498
499
500
501
502
503
504
505
506
507
508
509


@contextmanager
def all_logging_disabled(highest_level=logging.CRITICAL):
    """ source - https://gist.github.com/simon-weber/7853144
    A context manager that will prevent any logging messages triggered during the body from being processed.
    :param highest_level: the maximum logging level in use.
      This would only need to be changed if a custom level greater than CRITICAL is defined.
    """
    previous_level = logging.root.manager.disable
    logging.disable(highest_level)
    try:
        yield
    finally:
Ayush Chaurasia's avatar
Ayush Chaurasia committed
510
        logging.disable(previous_level)