diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000..c8cdc95 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,4 @@ +# Format code with Black and isort +3c17a2337166245de8df778fe174aad997e14e8f +9cb6b20949c7c31ee21ed2b800e8b691f1be32a7 +53100f51e083cf4d900ed325ae0543cc754a26cc diff --git a/.gitignore b/.gitignore index 4139d6b..c6206b0 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,108 @@ -*.pyc +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# IPython +profile_default/ +ipython_config.py + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Rope project settings +.ropeproject/ + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Model artifacts *.png *.jpg *.wav *.tar *.json -*.ipynb_checkpoints/ -experiments/* plots/* +# Batdetect Models [Include] +!bat_detect/models/*.pth.tar + +# Model experiments +experiments/* + +# Jupiter notebooks +.virtual_documents +.ipynb_checkpoints +*.ipynb +!batdetect2_notebook.ipynb diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000..b20ceed --- /dev/null +++ b/.pylintrc @@ -0,0 +1,5 @@ +[TYPECHECK] + +# List of members which are set dynamically and missed by Pylint inference +# system, and so shouldn't trigger E1101 when accessed. +generated-members=torch.* diff --git a/README.md b/README.md index e24bf79..f62a078 100644 --- a/README.md +++ b/README.md @@ -1,61 +1,122 @@ # BatDetect2 - + Code for detecting and classifying bat echolocation calls in high frequency audio recordings. -Code for detecting and classifying bat echolocation calls in high frequency audio recordings. +## Getting started +### Python Environment + +We recommend using an isolated Python environment to avoid dependency issues. Choose one +of the following options: + +* Install the Anaconda Python 3.10 distribution for your operating system from [here](https://www.continuum.io/downloads). Create a new environment and activate it: + +```bash +conda create -y --name batdetect2 python==3.10 +conda activate batdetect2 +``` + +* If you already have Python installed (version >= 3.8,< 3.11) and prefer using virtual environments then: + +```bash +python -m venv .venv +source .venv/bin/activate +``` + +### Installing BatDetect2 +You can use pip to install `batdetect2`: + +```bash +pip install batdetect2 +``` + +Alternatively, download this code from the repository (by clicking on the green button on top right) and unzip it. +Once unziped, run this from extracted folder. + +```bash +pip install . +``` + +Make sure you have the environment activated before installing `batdetect2`. -### Getting started -1) Install the Anaconda Python 3.10 distribution for your operating system from [here](https://www.continuum.io/downloads). -2) Download this code from the repository (by clicking on the green button on top right) and unzip it. -3) Create a new environment and install the required packages: -`conda env create -f environment.yml` -`conda activate batdetect2` +## Try the model +1) You can try a demo of the model (for UK species) on [huggingface](https://huggingface.co/spaces/macaodha/batdetect2). + +2) Alternatively, click [here](https://colab.research.google.com/github/macaodha/batdetect2/blob/master/batdetect2_notebook.ipynb) to run the model using Google Colab. You can also run this notebook locally. -### Try the model -1) You can try a demo of the model (for UK species) on [huggingface](https://huggingface.co/spaces/macaodha/batdetect2). +## Running the model on your own data -2) Alternatively, click [here](https://colab.research.google.com/github/macaodha/batdetect2/blob/master/batdetect2_notebook.ipynb) to run the model using Google Colab. You can also run this notebook locally. +After following the above steps to install the code you can run the model on your own data. -### Running the model on your own data -After following the above steps to install the code you can run the model on your own data by opening the command line where the code is located and typing: -`python run_batdetect.py AUDIO_DIR ANN_DIR DETECTION_THRESHOLD` -e.g. -`python run_batdetect.py example_data/audio/ example_data/anns/ 0.3` +### Using the command line + +You can run the model by opening the command line and typing: +```bash +batdetect2 detect AUDIO_DIR ANN_DIR DETECTION_THRESHOLD +``` +e.g. +```bash +batdetect2 detect example_data/audio/ example_data/anns/ 0.3 +``` + +`AUDIO_DIR` is the path on your computer to the audio wav files of interest. +`ANN_DIR` is the path on your computer where the model predictions will be saved. The model will output both `.csv` and `.json` results for each audio file. +`DETECTION_THRESHOLD` is a number between 0 and 1 specifying the cut-off threshold applied to the calls. A smaller number will result in more calls detected, but with the chance of introducing more mistakes. + +There are also optional arguments, e.g. you can request that the model outputs features (i.e. estimated call parameters) such as duration, max_frequency, etc. by setting the flag `--spec_features`. These will be saved as `*_spec_features.csv` files: +`batdetect2 detect example_data/audio/ example_data/anns/ 0.3 --spec_features` + +You can also specify which model to use by setting the `--model_path` argument. If not specified, it will default to using a model trained on UK data e.g. +`batdetect2 detect example_data/audio/ example_data/anns/ 0.3 --model_path models/Net2DFast_UK_same.pth.tar` -`AUDIO_DIR` is the path on your computer to the audio wav files of interest. -`ANN_DIR` is the path on your computer where the model predictions will be saved. The model will output both `.csv` and `.json` results for each audio file. -`DETECTION_THRESHOLD` is a number between 0 and 1 specifying the cut-off threshold applied to the calls. A smaller number will result in more calls detected, but with the chance of introducing more mistakes. +### Using the Python API -There are also optional arguments, e.g. you can request that the model outputs features (i.e. estimated call parameters) such as duration, max_frequency, etc. by setting the flag `--spec_features`. These will be saved as `*_spec_features.csv` files: -`python run_batdetect.py example_data/audio/ example_data/anns/ 0.3 --spec_features` +If you prefer to process your data within a Python script then you can use the `batdetect2` Python API. -You can also specify which model to use by setting the `--model_path` argument. If not specified, it will default to using a model trained on UK data e.g. -`python run_batdetect.py example_data/audio/ example_data/anns/ 0.3 --model_path models/Net2DFast_UK_same.pth.tar` +```python +from batdetect2 import api + +AUDIO_FILE = "example_data/audio/20170701_213954-MYOMYS-LR_0_0.5.wav" + +# Process a whole file +results = api.process_file(AUDIO_FILE) + +# Or, load audio and compute spectrograms +audio = api.load_audio(AUDIO_FILE) +spec = api.generate_spectrogram(audio) + +# And process the audio or the spectrogram with the model +detections, features, spec = api.process_audio(audio) +detections, features = api.process_spectrogram(spec) + +# Do something else ... +``` + +You can integrate the detections or the extracted features to your custom analysis pipeline. -### Training the model on your own data -Take a look at the steps outlined in fintuning readme [here](bat_detect/finetune/readme.md) for a description of how to train your own model. +## Training the model on your own data +Take a look at the steps outlined in fintuning readme [here](bat_detect/finetune/readme.md) for a description of how to train your own model. -### Data and annotations -The raw audio data and annotations used to train the models in the paper will be added soon. -The audio interface used to annotate audio data for training and evaluation is available [here](https://github.com/macaodha/batdetect2_GUI). +## Data and annotations +The raw audio data and annotations used to train the models in the paper will be added soon. +The audio interface used to annotate audio data for training and evaluation is available [here](https://github.com/macaodha/batdetect2_GUI). -### Warning +## Warning The models developed and shared as part of this repository should be used with caution. While they have been evaluated on held out audio data, great care should be taken when using the model outputs for any form of biodiversity assessment. Your data may differ, and as a result it is very strongly recommended that you validate the model first using data with known species to ensure that the outputs can be trusted. -### FAQ -For more information please consult our [FAQ](faq.md). +## FAQ +For more information please consult our [FAQ](faq.md). -### Reference +## Reference If you find our work useful in your research please consider citing our paper which you can find [here](https://www.biorxiv.org/content/10.1101/2022.12.14.520490v1): ``` @article{batdetect2_2022, @@ -66,8 +127,8 @@ If you find our work useful in your research please consider citing our paper wh } ``` -### Acknowledgements -Thanks to all the contributors who spent time collecting and annotating audio data. +## Acknowledgements +Thanks to all the contributors who spent time collecting and annotating audio data. ### TODOs diff --git a/app.py b/app.py index ae44690..1b820b2 100644 --- a/app.py +++ b/app.py @@ -1,84 +1,126 @@ import gradio as gr -import os import matplotlib.pyplot as plt -import pandas as pd import numpy as np +import pandas as pd -import bat_detect.utils.detector_utils as du import bat_detect.utils.audio_utils as au +import bat_detect.utils.detector_utils as du import bat_detect.utils.plot_utils as viz - # setup the arguments args = {} -args = du.get_default_bd_args() -args['detection_threshold'] = 0.3 -args['time_expansion_factor'] = 1 -args['model_path'] = 'models/Net2DFast_UK_same.pth.tar' +args = du.get_default_run_config() +args["detection_threshold"] = 0.3 +args["time_expansion_factor"] = 1 +args["model_path"] = "models/Net2DFast_UK_same.pth.tar" max_duration = 2.0 # load the model -model, params = du.load_model(args['model_path']) +model, params = du.load_model(args["model_path"]) -df = gr.Dataframe( - headers=["species", "time", "detection_prob", "species_prob"], - datatype=["str", "str", "str", "str"], - row_count=1, - col_count=(4, "fixed"), - label='Predictions' - ) - -examples = [['example_data/audio/20170701_213954-MYOMYS-LR_0_0.5.wav', 0.3], - ['example_data/audio/20180530_213516-EPTSER-LR_0_0.5.wav', 0.3], - ['example_data/audio/20180627_215323-RHIFER-LR_0_0.5.wav', 0.3]] +df = gr.Dataframe( + headers=["species", "time", "detection_prob", "species_prob"], + datatype=["str", "str", "str", "str"], + row_count=1, + col_count=(4, "fixed"), + label="Predictions", +) + +examples = [ + ["example_data/audio/20170701_213954-MYOMYS-LR_0_0.5.wav", 0.3], + ["example_data/audio/20180530_213516-EPTSER-LR_0_0.5.wav", 0.3], + ["example_data/audio/20180627_215323-RHIFER-LR_0_0.5.wav", 0.3], +] def make_prediction(file_name=None, detection_threshold=0.3): - if file_name is not None: audio_file = file_name else: return "You must provide an input audio file." - - if detection_threshold is not None and detection_threshold != '': - args['detection_threshold'] = float(detection_threshold) - + + if detection_threshold is not None and detection_threshold != "": + args["detection_threshold"] = float(detection_threshold) + + run_config = { + **params, + **args, + "max_duration": max_duration, + } + # process the file to generate predictions - results = du.process_file(audio_file, model, params, args, max_duration=max_duration) - - anns = [ann for ann in results['pred_dict']['annotation']] - clss = [aa['class'] for aa in anns] - st_time = [aa['start_time'] for aa in anns] - cls_prob = [aa['class_prob'] for aa in anns] - det_prob = [aa['det_prob'] for aa in anns] - data = {'species': clss, 'time': st_time, 'detection_prob': det_prob, 'species_prob': cls_prob} - + results = du.process_file( + audio_file, + model, + run_config, + ) + + anns = [ann for ann in results["pred_dict"]["annotation"]] + clss = [aa["class"] for aa in anns] + st_time = [aa["start_time"] for aa in anns] + cls_prob = [aa["class_prob"] for aa in anns] + det_prob = [aa["det_prob"] for aa in anns] + data = { + "species": clss, + "time": st_time, + "detection_prob": det_prob, + "species_prob": cls_prob, + } + df = pd.DataFrame(data=data) im = generate_results_image(audio_file, anns) - + return [df, im] -def generate_results_image(audio_file, anns): - +def generate_results_image(audio_file, anns): + # load audio - sampling_rate, audio = au.load_audio_file(audio_file, args['time_expansion_factor'], - params['target_samp_rate'], params['scale_raw_audio'], max_duration=max_duration) + sampling_rate, audio = au.load_audio( + audio_file, + args["time_expansion_factor"], + params["target_samp_rate"], + params["scale_raw_audio"], + max_duration=max_duration, + ) duration = audio.shape[0] / sampling_rate - + # generate spec - spec, spec_viz = au.generate_spectrogram(audio, sampling_rate, params, True, False) + spec, spec_viz = au.generate_spectrogram( + audio, sampling_rate, params, True, False + ) # create fig - plt.close('all') - fig = plt.figure(1, figsize=(spec.shape[1]/100, spec.shape[0]/100), dpi=100, frameon=False) - spec_duration = au.x_coords_to_time(spec.shape[1], sampling_rate, params['fft_win_length'], params['fft_overlap']) - viz.create_box_image(spec, fig, anns, 0, spec_duration, spec_duration, params, spec.max()*1.1, False, True) - plt.ylabel('Freq - kHz') - plt.xlabel('Time - secs') + plt.close("all") + fig = plt.figure( + 1, + figsize=(spec.shape[1] / 100, spec.shape[0] / 100), + dpi=100, + frameon=False, + ) + spec_duration = au.x_coords_to_time( + spec.shape[1], + sampling_rate, + params["fft_win_length"], + params["fft_overlap"], + ) + viz.create_box_image( + spec, + fig, + anns, + 0, + spec_duration, + spec_duration, + params, + spec.max() * 1.1, + False, + True, + ) + plt.ylabel("Freq - kHz") + plt.xlabel("Time - secs") plt.tight_layout() - + # convert fig to image fig.canvas.draw() data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8) @@ -88,21 +130,23 @@ def generate_results_image(audio_file, anns): return im -descr_txt = "Demo of BatDetect2 deep learning-based bat echolocation call detection. " \ - "
This model is only trained on bat species from the UK. If the input " \ - "file is longer than 2 seconds, only the first 2 seconds will be processed." \ - "
Check out the paper [here](https://www.biorxiv.org/content/10.1101/2022.12.14.520490v1)." +descr_txt = ( + "Demo of BatDetect2 deep learning-based bat echolocation call detection. " + "
This model is only trained on bat species from the UK. If the input " + "file is longer than 2 seconds, only the first 2 seconds will be processed." + "
Check out the paper [here](https://www.biorxiv.org/content/10.1101/2022.12.14.520490v1)." +) gr.Interface( - fn = make_prediction, - inputs = [gr.Audio(source="upload", type="filepath", optional=True), - gr.Dropdown([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])], - outputs = [df, gr.Image(label="Visualisation")], - theme = "huggingface", - title = "BatDetect2 Demo", - description = descr_txt, - examples = examples, - allow_flagging = 'never', + fn=make_prediction, + inputs=[ + gr.Audio(source="upload", type="filepath", optional=True), + gr.Dropdown([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]), + ], + outputs=[df, gr.Image(label="Visualisation")], + theme="huggingface", + title="BatDetect2 Demo", + description=descr_txt, + examples=examples, + allow_flagging="never", ).launch() - - diff --git a/bat_detect/api.py b/bat_detect/api.py new file mode 100644 index 0000000..df9c987 --- /dev/null +++ b/bat_detect/api.py @@ -0,0 +1,397 @@ +"""Python API for bat_detect. + +This module provides a Python API for bat_detect. It can be used to +process audio files or spectrograms with the default model or a custom +model. + +Example +------- +You can use the default model to process audio files. To process a single +file, use the `process_file` function. +>>> import bat_detect.api as api +>>> # Process audio file +>>> results = api.process_file("audio_file.wav") + +To process multiple files, use the `list_audio_files` function to get a list +of audio files in a directory. Then use the `process_file` function to +process each file. + +>>> import bat_detect.api as api +>>> # Get list of audio files +>>> audio_files = api.list_audio_files("audio_directory") +>>> # Process audio files +>>> results = [api.process_file(f) for f in audio_files] + +The `process_file` function will slice the recording into 3 second chunks +and process each chunk separately, in case the recording is longer. The +results will be combined into a dictionary with the following keys: + + - `pred_dict`: All the predictions from the model in the format + expected by the annotation tool. + - `cnn_feats`: Optional. A list of `numpy` arrays containing the CNN features + for each detection. The CNN features are the output of the CNN before + the final classification layer. You can use these features to train + your own classifier, or to do other processing on the detections. + They are in the same order as the detections in + `results['pred_dict']['annotation']`. Will only be returned if the + `cnn_feats` parameter in the config is set to `True`. + - `spec_slices`: Optional. A list of `numpy` arrays containing the spectrogram + for each of the processed chunks. Will only be returned if the + `spec_slices` parameter in the config is set to `True`. + +Alternatively, you can use the `process_audio` function to process an audio +array directly, or `process_spectrogram` to process spectrograms. This +allows you to do other preprocessing steps before running the model for +predictions. + +>>> import bat_detect.api as api +>>> # Load audio +>>> audio = api.load_audio("audio_file.wav") +>>> # Process the audio array +>>> detections, features, spec = api.process_audio(audio) +>>> # Or compute and process the spectrogram +>>> spec = api.generate_spectrogram(audio) +>>> detections, features = api.process_spectrogram(spec) + +Here `detections` is the list of detected calls, `features` is the list of +CNN features for each detection, and `spec` is the spectrogram of the +processed audio. Each detection is a dictionary similary to the +following: + + { + 'start_time': 0.0, + 'end_time': 0.1, + 'low_freq': 10000, + 'high_freq': 20000, + 'class': 'Myotis myotis', + 'class_prob': 0.9, + 'det_prob': 0.9, + 'individual': 0, + 'event': 'Echolocation' + } + +If you wish to interact directly with the model, you can use the `model` +attribute to get the default model. + +>>> import bat_detect.api as api +>>> # Get the default model +>>> model = api.model +>>> # Process the spectrogram +>>> outputs = model(spec) + +However, you will need to do the postprocessing yourself. The +model outputs are a collection of raw tensors. The `postprocess` +function can be used to convert the model outputs into a list of +detections and a list of CNN features. + +>>> import bat_detect.api as api +>>> # Get the default model +>>> model = api.model +>>> # Process the spectrogram +>>> outputs = model(spec) +>>> # Postprocess the outputs +>>> detections, features = api.postprocess(outputs) + +If you wish to use a custom model or change the default parameters, please +consult the API documentation in the code. + +""" +import warnings +from typing import List, Optional, Tuple + +import numpy as np +import torch + +import bat_detect.utils.audio_utils as au +import bat_detect.utils.detector_utils as du +from bat_detect.detector.parameters import ( + DEFAULT_MODEL_PATH, + DEFAULT_PROCESSING_CONFIGURATIONS, + DEFAULT_SPECTROGRAM_PARAMETERS, + TARGET_SAMPLERATE_HZ, +) +from bat_detect.types import ( + Annotation, + DetectionModel, + ModelOutput, + ProcessingConfiguration, + SpectrogramParameters, +) +from bat_detect.utils.detector_utils import list_audio_files, load_model + +# Remove warnings from torch +warnings.filterwarnings("ignore", category=UserWarning, module="torch") + +__all__ = [ + "config", + "generate_spectrogram", + "get_config", + "list_audio_files", + "load_audio", + "load_model", + "model", + "postprocess", + "process_audio", + "process_file", + "process_spectrogram", +] + + +# Use GPU if available +DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + +# Default model +MODEL, PARAMS = load_model(DEFAULT_MODEL_PATH, device=DEVICE) + + +def get_config(**kwargs) -> ProcessingConfiguration: + """Get default processing configuration. + + Can be used to override default parameters by passing keyword arguments. + """ + return {**DEFAULT_PROCESSING_CONFIGURATIONS, **kwargs} # type: ignore + + +# Default processing configuration +CONFIG = get_config(**PARAMS) + + +def load_audio( + path: str, + time_exp_fact: float = 1, + target_samp_rate: int = TARGET_SAMPLERATE_HZ, + scale: bool = False, + max_duration: Optional[float] = None, +) -> np.ndarray: + """Load audio from file. + + All audio will be resampled to the target sample rate. If the audio is + longer than max_duration, it will be truncated to max_duration. + + Parameters + ---------- + path : str + Path to audio file. + time_exp_fact : float, optional + Time expansion factor, by default 1 + target_samp_rate : int, optional + Target sample rate, by default 256000 + scale : bool, optional + Scale audio to [-1, 1], by default False + max_duration : float, optional + Maximum duration of audio in seconds, by default None + + Returns + ------- + np.ndarray + Audio data. + """ + _, audio = au.load_audio( + path, + time_exp_fact, + target_samp_rate, + scale, + max_duration, + ) + return audio + + +def generate_spectrogram( + audio: np.ndarray, + samp_rate: int = TARGET_SAMPLERATE_HZ, + config: Optional[SpectrogramParameters] = None, + device: torch.device = DEVICE, +) -> torch.Tensor: + """Generate spectrogram from audio array. + + Parameters + ---------- + audio : np.ndarray + Audio data. + samp_rate : int, optional + Sample rate. Defaults to 256000 which is the target sample rate of + the default model. Only change if you loaded the audio with a + different sample rate. + config : SpectrogramParameters, optional + Spectrogram parameters, by default None (uses default parameters). + + Returns + ------- + torch.Tensor + Spectrogram. + """ + if config is None: + config = DEFAULT_SPECTROGRAM_PARAMETERS + + _, spec, _ = du.compute_spectrogram( + audio, + samp_rate, + config, + return_np=False, + device=device, + ) + + return spec + + +def process_file( + audio_file: str, + model: DetectionModel = MODEL, + config: Optional[ProcessingConfiguration] = None, + device: torch.device = DEVICE, +) -> du.RunResults: + """Process audio file with model. + + Parameters + ---------- + audio_file : str + Path to audio file. + model : DetectionModel, optional + Detection model. Uses default model if not specified. + config : Optional[ProcessingConfiguration], optional + Processing configuration, by default None (uses default parameters). + device : torch.device, optional + Device to use, by default tries to use GPU if available. + """ + if config is None: + config = CONFIG + + return du.process_file( + audio_file, + model, + config, + device, + ) + + +def process_spectrogram( + spec: torch.Tensor, + samp_rate: int = TARGET_SAMPLERATE_HZ, + model: DetectionModel = MODEL, + config: Optional[ProcessingConfiguration] = None, +) -> Tuple[List[Annotation], List[np.ndarray]]: + """Process spectrogram with model. + + Parameters + ---------- + spec : torch.Tensor + Spectrogram. + samp_rate : int, optional + Sample rate of the audio from which the spectrogram was generated. + Defaults to 256000 which is the target sample rate of the default + model. Only change if you generated the spectrogram with a different + sample rate. + model : DetectionModel, optional + Detection model. Uses default model if not specified. + config : Optional[ProcessingConfiguration], optional + Processing configuration, by default None (uses default parameters). + + Returns + ------- + DetectionResult + """ + if config is None: + config = CONFIG + + return du.process_spectrogram( + spec, + samp_rate, + model, + config, + ) + + +def process_audio( + audio: np.ndarray, + samp_rate: int = TARGET_SAMPLERATE_HZ, + model: DetectionModel = MODEL, + config: Optional[ProcessingConfiguration] = None, + device: torch.device = DEVICE, +) -> Tuple[List[Annotation], List[np.ndarray], torch.Tensor]: + """Process audio array with model. + + Parameters + ---------- + audio : np.ndarray + Audio data. + samp_rate : int, optional + Sample rate, by default 256000. Only change if you loaded the audio + with a different sample rate. + model : DetectionModel, optional + Detection model. Uses default model if not specified. + config : Optional[ProcessingConfiguration], optional + Processing configuration, by default None (uses default parameters). + device : torch.device, optional + Device to use, by default tries to use GPU if available. + + Returns + ------- + annotations : List[Annotation] + List of predicted annotations. + + features: List[np.ndarray] + List of extracted features for each annotation. + + spec : torch.Tensor + Spectrogram of the audio used for prediction. + """ + if config is None: + config = CONFIG + + return du.process_audio_array( + audio, + samp_rate, + model, + config, + device, + ) + + +def postprocess( + outputs: ModelOutput, + samp_rate: int = TARGET_SAMPLERATE_HZ, + config: Optional[ProcessingConfiguration] = None, +) -> Tuple[List[Annotation], np.ndarray]: + """Postprocess model outputs. + + Convert model tensor outputs to predicted bounding boxes and + extracted features. + + Will run non-maximum suppression and remove overlapping annotations. + + Parameters + ---------- + outputs : ModelOutput + Model raw outputs. + samp_rate : int, Optional + Sample rate of the audio from which the spectrogram was generated. + Defaults to 256000 which is the target sample rate of the default + model. Only change if you generated outputs from a spectrogram with + sample rate. + config : Optional[ProcessingConfiguration], Optional + Processing configuration, by default None (uses default parameters). + + Returns + ------- + annotations : List[Annotation] + List of predicted annotations. + features: np.ndarray + An array of extracted features for each annotation. The shape of the + array is (n_annotations, n_features). + """ + if config is None: + config = CONFIG + + return du.postprocess_model_outputs( + outputs, + samp_rate, + config, + ) + + +model: DetectionModel = MODEL +"""Base detection model.""" + +config: ProcessingConfiguration = CONFIG +"""Default processing configuration.""" diff --git a/bat_detect/cli.py b/bat_detect/cli.py new file mode 100644 index 0000000..29f4142 --- /dev/null +++ b/bat_detect/cli.py @@ -0,0 +1,137 @@ +"""BatDetect2 command line interface.""" +import os + +import click + +from bat_detect import api +from bat_detect.detector.parameters import DEFAULT_MODEL_PATH +from bat_detect.utils.detector_utils import save_results_to_file + +CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +INFO_STR = """ +BatDetect2 - Detection and Classification + Assumes audio files are mono, not stereo. + Spaces in the input paths will throw an error. Wrap in quotes. + Input files should be short in duration e.g. < 30 seconds. +""" + + +@click.group() +def cli(): + """BatDetect2 - Bat Call Detection and Classification.""" + click.echo(INFO_STR) + + +@cli.command() +@click.argument( + "audio_dir", + type=click.Path(exists=True), +) +@click.argument( + "ann_dir", + type=click.Path(exists=False), +) +@click.argument( + "detection_threshold", + type=float, +) +@click.option( + "--cnn_features", + is_flag=True, + default=False, + help="Extracts CNN call features", +) +@click.option( + "--spec_features", + is_flag=True, + default=False, + help="Extracts low level call features", +) +@click.option( + "--time_expansion_factor", + type=int, + default=1, + help="The time expansion factor used for all files (default is 1)", +) +@click.option( + "--quiet", + is_flag=True, + default=False, + help="Minimize output printing", +) +@click.option( + "--save_preds_if_empty", + is_flag=True, + default=False, + help="Save empty annotation file if no detections made.", +) +@click.option( + "--model_path", + type=str, + default=DEFAULT_MODEL_PATH, + help="Path to trained BatDetect2 model", +) +def detect( + audio_dir: str, + ann_dir: str, + detection_threshold: float, + **args, +): + """Detect bat calls in files in AUDIO_DIR and save predictions to ANN_DIR. + + DETECTION_THRESHOLD is the detection threshold. All predictions with a + score below this threshold will be discarded. Values between 0 and 1. + + Assumes audio files are mono, not stereo. + + Spaces in the input paths will throw an error. Wrap in quotes. + + Input files should be short in duration e.g. < 30 seconds. + """ + click.echo(f"Loading model: {args['model_path']}") + model, params = api.load_model(args["model_path"]) + + click.echo(f"\nInput directory: {audio_dir}") + files = api.list_audio_files(audio_dir) + + click.echo(f"Number of audio files: {len(files)}") + click.echo(f"\nSaving results to: {ann_dir}") + + config = api.get_config( + **{ + **params, + **args, + "spec_slices": False, + "chunk_size": 2, + "detection_threshold": detection_threshold, + } + ) + + # process files + error_files = [] + for audio_file in files: + try: + results = api.process_file(audio_file, model, config=config) + + if args["save_preds_if_empty"] or ( + len(results["pred_dict"]["annotation"]) > 0 + ): + results_path = audio_file.replace(audio_dir, ann_dir) + save_results_to_file(results, results_path) + except (RuntimeError, ValueError, LookupError) as err: + error_files.append(audio_file) + click.secho(f"Error processing file!: {err}", fg="red") + raise err + + click.echo(f"\nResults saved to: {ann_dir}") + + if len(error_files) > 0: + click.secho("\nUnable to process the follow files:", fg="red") + for err in error_files: + click.echo(f" {err}") + + +if __name__ == "__main__": + cli() diff --git a/bat_detect/detector/compute_features.py b/bat_detect/detector/compute_features.py index b6f4b8b..368c2db 100644 --- a/bat_detect/detector/compute_features.py +++ b/bat_detect/detector/compute_features.py @@ -2,8 +2,10 @@ import numpy as np def convert_int_to_freq(spec_ind, spec_height, min_freq, max_freq): - spec_ind = spec_height-spec_ind - return round((spec_ind / float(spec_height)) * (max_freq - min_freq) + min_freq, 2) + spec_ind = spec_height - spec_ind + return round( + (spec_ind / float(spec_height)) * (max_freq - min_freq) + min_freq, 2 + ) def extract_spec_slices(spec, pred_nms, params): @@ -11,28 +13,40 @@ def extract_spec_slices(spec, pred_nms, params): Extracts spectrogram slices from spectrogram based on detected call locations. """ - x_pos = pred_nms['x_pos'] - y_pos = pred_nms['y_pos'] - bb_width = pred_nms['bb_width'] - bb_height = pred_nms['bb_height'] - slices = [] + x_pos = pred_nms["x_pos"] + y_pos = pred_nms["y_pos"] + bb_width = pred_nms["bb_width"] + bb_height = pred_nms["bb_height"] + slices = [] # add 20% padding either side of call - pad = bb_width*0.2 - x_pos_pad = x_pos - pad - bb_width_pad = bb_width + 2*pad + pad = bb_width * 0.2 + x_pos_pad = x_pos - pad + bb_width_pad = bb_width + 2 * pad - for ff in range(len(pred_nms['det_probs'])): + for ff in range(len(pred_nms["det_probs"])): x_start = int(np.maximum(0, x_pos_pad[ff])) - x_end = int(np.minimum(spec.shape[1]-1, np.round(x_pos_pad[ff] + bb_width_pad[ff]))) + x_end = int( + np.minimum( + spec.shape[1] - 1, np.round(x_pos_pad[ff] + bb_width_pad[ff]) + ) + ) slices.append(spec[:, x_start:x_end].astype(np.float16)) return slices def get_feature_names(): - feature_names = ['duration', 'low_freq_bb', 'high_freq_bb', 'bandwidth', - 'max_power_bb', 'max_power', 'max_power_first', - 'max_power_second', 'call_interval'] + feature_names = [ + "duration", + "low_freq_bb", + "high_freq_bb", + "bandwidth", + "max_power_bb", + "max_power", + "max_power_first", + "max_power_second", + "call_interval", + ] return feature_names @@ -45,40 +59,76 @@ def get_feats(spec, pred_nms, params): https://github.com/YvesBas/Tadarida-D/blob/master/Manual_Tadarida-D.odt """ - x_pos = pred_nms['x_pos'] - y_pos = pred_nms['y_pos'] - bb_width = pred_nms['bb_width'] - bb_height = pred_nms['bb_height'] + x_pos = pred_nms["x_pos"] + y_pos = pred_nms["y_pos"] + bb_width = pred_nms["bb_width"] + bb_height = pred_nms["bb_height"] - feature_names = get_feature_names() - num_detections = len(pred_nms['det_probs']) - features = np.ones((num_detections, len(feature_names)), dtype=np.float32)*-1 + feature_names = get_feature_names() + num_detections = len(pred_nms["det_probs"]) + features = ( + np.ones((num_detections, len(feature_names)), dtype=np.float32) * -1 + ) for ff in range(num_detections): x_start = int(np.maximum(0, x_pos[ff])) - x_end = int(np.minimum(spec.shape[1]-1, np.round(x_pos[ff] + bb_width[ff]))) + x_end = int( + np.minimum(spec.shape[1] - 1, np.round(x_pos[ff] + bb_width[ff])) + ) # y low is the lowest freq but it will have a higher value due to array starting at 0 at top - y_low = int(np.minimum(spec.shape[0]-1, y_pos[ff])) - y_high = int(np.maximum(0, np.round(y_pos[ff] - bb_height[ff]))) + y_low = int(np.minimum(spec.shape[0] - 1, y_pos[ff])) + y_high = int(np.maximum(0, np.round(y_pos[ff] - bb_height[ff]))) spec_slice = spec[:, x_start:x_end] if spec_slice.shape[1] > 1: - features[ff, 0] = round(pred_nms['end_times'][ff] - pred_nms['start_times'][ff], 5) - features[ff, 1] = int(pred_nms['low_freqs'][ff]) - features[ff, 2] = int(pred_nms['high_freqs'][ff]) - features[ff, 3] = int(pred_nms['high_freqs'][ff] - pred_nms['low_freqs'][ff]) - features[ff, 4] = int(convert_int_to_freq(y_high+spec_slice[y_high:y_low, :].sum(1).argmax(), - spec.shape[0], params['min_freq'], params['max_freq'])) - features[ff, 5] = int(convert_int_to_freq(spec_slice.sum(1).argmax(), - spec.shape[0], params['min_freq'], params['max_freq'])) - hlf_val = spec_slice.shape[1]//2 + features[ff, 0] = round( + pred_nms["end_times"][ff] - pred_nms["start_times"][ff], 5 + ) + features[ff, 1] = int(pred_nms["low_freqs"][ff]) + features[ff, 2] = int(pred_nms["high_freqs"][ff]) + features[ff, 3] = int( + pred_nms["high_freqs"][ff] - pred_nms["low_freqs"][ff] + ) + features[ff, 4] = int( + convert_int_to_freq( + y_high + spec_slice[y_high:y_low, :].sum(1).argmax(), + spec.shape[0], + params["min_freq"], + params["max_freq"], + ) + ) + features[ff, 5] = int( + convert_int_to_freq( + spec_slice.sum(1).argmax(), + spec.shape[0], + params["min_freq"], + params["max_freq"], + ) + ) + hlf_val = spec_slice.shape[1] // 2 - features[ff, 6] = int(convert_int_to_freq(spec_slice[:, :hlf_val].sum(1).argmax(), - spec.shape[0], params['min_freq'], params['max_freq'])) - features[ff, 7] = int(convert_int_to_freq(spec_slice[:, hlf_val:].sum(1).argmax(), - spec.shape[0], params['min_freq'], params['max_freq'])) + features[ff, 6] = int( + convert_int_to_freq( + spec_slice[:, :hlf_val].sum(1).argmax(), + spec.shape[0], + params["min_freq"], + params["max_freq"], + ) + ) + features[ff, 7] = int( + convert_int_to_freq( + spec_slice[:, hlf_val:].sum(1).argmax(), + spec.shape[0], + params["min_freq"], + params["max_freq"], + ) + ) if ff > 0: - features[ff, 8] = round(pred_nms['start_times'][ff] - pred_nms['start_times'][ff-1], 5) + features[ff, 8] = round( + pred_nms["start_times"][ff] + - pred_nms["start_times"][ff - 1], + 5, + ) return features diff --git a/bat_detect/detector/model_helpers.py b/bat_detect/detector/model_helpers.py index c91ef04..789bdb6 100644 --- a/bat_detect/detector/model_helpers.py +++ b/bat_detect/detector/model_helpers.py @@ -1,8 +1,14 @@ -import torch.nn as nn import torch import torch.nn.functional as F -import numpy as np -import math +from torch import nn + +__all__ = [ + "SelfAttention", + "ConvBlockDownCoordF", + "ConvBlockDownStandard", + "ConvBlockUpF", + "ConvBlockUpStandard", +] class SelfAttention(nn.Module): @@ -10,38 +16,61 @@ class SelfAttention(nn.Module): super(SelfAttention, self).__init__() # Note, does not encode position information (absolute or realtive) self.temperature = 1.0 - self.att_dim = att_dim + self.att_dim = att_dim self.key_fun = nn.Linear(ip_dim, att_dim) self.val_fun = nn.Linear(ip_dim, att_dim) self.que_fun = nn.Linear(ip_dim, att_dim) self.pro_fun = nn.Linear(att_dim, ip_dim) def forward(self, x): - x = x.squeeze(2).permute(0,2,1) + x = x.squeeze(2).permute(0, 2, 1) - kk = torch.matmul(x, self.key_fun.weight.T) + self.key_fun.bias.unsqueeze(0).unsqueeze(0) - qq = torch.matmul(x, self.que_fun.weight.T) + self.que_fun.bias.unsqueeze(0).unsqueeze(0) - vv = torch.matmul(x, self.val_fun.weight.T) + self.val_fun.bias.unsqueeze(0).unsqueeze(0) + kk = torch.matmul( + x, self.key_fun.weight.T + ) + self.key_fun.bias.unsqueeze(0).unsqueeze(0) + qq = torch.matmul( + x, self.que_fun.weight.T + ) + self.que_fun.bias.unsqueeze(0).unsqueeze(0) + vv = torch.matmul( + x, self.val_fun.weight.T + ) + self.val_fun.bias.unsqueeze(0).unsqueeze(0) - kk_qq = torch.bmm(kk, qq.permute(0,2,1)) / (self.temperature*self.att_dim) - att_weights = F.softmax(kk_qq, 1) # each col of each attention matrix sums to 1 - att = torch.bmm(vv.permute(0,2,1), att_weights) + kk_qq = torch.bmm(kk, qq.permute(0, 2, 1)) / ( + self.temperature * self.att_dim + ) + att_weights = F.softmax( + kk_qq, 1 + ) # each col of each attention matrix sums to 1 + att = torch.bmm(vv.permute(0, 2, 1), att_weights) - op = torch.matmul(att.permute(0,2,1), self.pro_fun.weight.T) + self.pro_fun.bias.unsqueeze(0).unsqueeze(0) - op = op.permute(0,2,1).unsqueeze(2) + op = torch.matmul( + att.permute(0, 2, 1), self.pro_fun.weight.T + ) + self.pro_fun.bias.unsqueeze(0).unsqueeze(0) + op = op.permute(0, 2, 1).unsqueeze(2) return op class ConvBlockDownCoordF(nn.Module): - def __init__(self, in_chn, out_chn, ip_height, k_size=3, pad_size=1, stride=1): + def __init__( + self, in_chn, out_chn, ip_height, k_size=3, pad_size=1, stride=1 + ): super(ConvBlockDownCoordF, self).__init__() - self.coords = nn.Parameter(torch.linspace(-1, 1, ip_height)[None, None, ..., None], requires_grad=False) - self.conv = nn.Conv2d(in_chn+1, out_chn, kernel_size=k_size, padding=pad_size, stride=stride) + self.coords = nn.Parameter( + torch.linspace(-1, 1, ip_height)[None, None, ..., None], + requires_grad=False, + ) + self.conv = nn.Conv2d( + in_chn + 1, + out_chn, + kernel_size=k_size, + padding=pad_size, + stride=stride, + ) self.conv_bn = nn.BatchNorm2d(out_chn) def forward(self, x): - freq_info = self.coords.repeat(x.shape[0],1,1,x.shape[3]) + freq_info = self.coords.repeat(x.shape[0], 1, 1, x.shape[3]) x = torch.cat((x, freq_info), 1) x = F.max_pool2d(self.conv(x), 2, 2) x = F.relu(self.conv_bn(x), inplace=True) @@ -49,9 +78,17 @@ class ConvBlockDownCoordF(nn.Module): class ConvBlockDownStandard(nn.Module): - def __init__(self, in_chn, out_chn, ip_height=None, k_size=3, pad_size=1, stride=1): + def __init__( + self, in_chn, out_chn, ip_height=None, k_size=3, pad_size=1, stride=1 + ): super(ConvBlockDownStandard, self).__init__() - self.conv = nn.Conv2d(in_chn, out_chn, kernel_size=k_size, padding=pad_size, stride=stride) + self.conv = nn.Conv2d( + in_chn, + out_chn, + kernel_size=k_size, + padding=pad_size, + stride=stride, + ) self.conv_bn = nn.BatchNorm2d(out_chn) def forward(self, x): @@ -61,17 +98,41 @@ class ConvBlockDownStandard(nn.Module): class ConvBlockUpF(nn.Module): - def __init__(self, in_chn, out_chn, ip_height, k_size=3, pad_size=1, up_mode='bilinear', up_scale=(2,2)): + def __init__( + self, + in_chn, + out_chn, + ip_height, + k_size=3, + pad_size=1, + up_mode="bilinear", + up_scale=(2, 2), + ): super(ConvBlockUpF, self).__init__() self.up_scale = up_scale self.up_mode = up_mode - self.coords = nn.Parameter(torch.linspace(-1, 1, ip_height*up_scale[0])[None, None, ..., None], requires_grad=False) - self.conv = nn.Conv2d(in_chn+1, out_chn, kernel_size=k_size, padding=pad_size) + self.coords = nn.Parameter( + torch.linspace(-1, 1, ip_height * up_scale[0])[ + None, None, ..., None + ], + requires_grad=False, + ) + self.conv = nn.Conv2d( + in_chn + 1, out_chn, kernel_size=k_size, padding=pad_size + ) self.conv_bn = nn.BatchNorm2d(out_chn) def forward(self, x): - op = F.interpolate(x, size=(x.shape[-2]*self.up_scale[0], x.shape[-1]*self.up_scale[1]), mode=self.up_mode, align_corners=False) - freq_info = self.coords.repeat(op.shape[0],1,1,op.shape[3]) + op = F.interpolate( + x, + size=( + x.shape[-2] * self.up_scale[0], + x.shape[-1] * self.up_scale[1], + ), + mode=self.up_mode, + align_corners=False, + ) + freq_info = self.coords.repeat(op.shape[0], 1, 1, op.shape[3]) op = torch.cat((op, freq_info), 1) op = self.conv(op) op = F.relu(self.conv_bn(op), inplace=True) @@ -79,15 +140,34 @@ class ConvBlockUpF(nn.Module): class ConvBlockUpStandard(nn.Module): - def __init__(self, in_chn, out_chn, ip_height=None, k_size=3, pad_size=1, up_mode='bilinear', up_scale=(2,2)): + def __init__( + self, + in_chn, + out_chn, + ip_height=None, + k_size=3, + pad_size=1, + up_mode="bilinear", + up_scale=(2, 2), + ): super(ConvBlockUpStandard, self).__init__() self.up_scale = up_scale self.up_mode = up_mode - self.conv = nn.Conv2d(in_chn, out_chn, kernel_size=k_size, padding=pad_size) + self.conv = nn.Conv2d( + in_chn, out_chn, kernel_size=k_size, padding=pad_size + ) self.conv_bn = nn.BatchNorm2d(out_chn) def forward(self, x): - op = F.interpolate(x, size=(x.shape[-2]*self.up_scale[0], x.shape[-1]*self.up_scale[1]), mode=self.up_mode, align_corners=False) + op = F.interpolate( + x, + size=( + x.shape[-2] * self.up_scale[0], + x.shape[-1] * self.up_scale[1], + ), + mode=self.up_mode, + align_corners=False, + ) op = self.conv(op) op = F.relu(self.conv_bn(op), inplace=True) return op diff --git a/bat_detect/detector/models.py b/bat_detect/detector/models.py index fc7b5b4..99a48e1 100644 --- a/bat_detect/detector/models.py +++ b/bat_detect/detector/models.py @@ -1,54 +1,109 @@ -import torch.nn as nn import torch -import torch.nn.functional as F -import numpy as np -from .model_helpers import * - -import torchvision - import torch.fft +import torch.nn.functional as F from torch import nn +from bat_detect.detector.model_helpers import ( + ConvBlockDownCoordF, + ConvBlockDownStandard, + ConvBlockUpF, + ConvBlockUpStandard, + SelfAttention, +) +from bat_detect.types import ModelOutput + +__all__ = [ + "Net2DFast", + "Net2DFastNoAttn", + "Net2DFastNoCoordConv", +] + class Net2DFast(nn.Module): - def __init__(self, num_filts, num_classes=0, emb_dim=0, ip_height=128, resize_factor=0.5): - super(Net2DFast, self).__init__() + def __init__( + self, + num_filts, + num_classes=0, + emb_dim=0, + ip_height=128, + resize_factor=0.5, + ): + super().__init__() self.num_classes = num_classes self.emb_dim = emb_dim self.num_filts = num_filts self.resize_factor = resize_factor self.ip_height_rs = ip_height - self.bneck_height = self.ip_height_rs//32 + self.bneck_height = self.ip_height_rs // 32 # encoder - self.conv_dn_0 = ConvBlockDownCoordF(1, num_filts//4, self.ip_height_rs, k_size=3, pad_size=1, stride=1) - self.conv_dn_1 = ConvBlockDownCoordF(num_filts//4, num_filts//2, self.ip_height_rs//2, k_size=3, pad_size=1, stride=1) - self.conv_dn_2 = ConvBlockDownCoordF(num_filts//2, num_filts, self.ip_height_rs//4, k_size=3, pad_size=1, stride=1) - self.conv_dn_3 = nn.Conv2d(num_filts, num_filts*2, 3, padding=1) - self.conv_dn_3_bn = nn.BatchNorm2d(num_filts*2) + self.conv_dn_0 = ConvBlockDownCoordF( + 1, + num_filts // 4, + self.ip_height_rs, + k_size=3, + pad_size=1, + stride=1, + ) + self.conv_dn_1 = ConvBlockDownCoordF( + num_filts // 4, + num_filts // 2, + self.ip_height_rs // 2, + k_size=3, + pad_size=1, + stride=1, + ) + self.conv_dn_2 = ConvBlockDownCoordF( + num_filts // 2, + num_filts, + self.ip_height_rs // 4, + k_size=3, + pad_size=1, + stride=1, + ) + self.conv_dn_3 = nn.Conv2d(num_filts, num_filts * 2, 3, padding=1) + self.conv_dn_3_bn = nn.BatchNorm2d(num_filts * 2) # bottleneck - self.conv_1d = nn.Conv2d(num_filts*2, num_filts*2, (self.ip_height_rs//8,1), padding=0) - self.conv_1d_bn = nn.BatchNorm2d(num_filts*2) - self.att = SelfAttention(num_filts*2, num_filts*2) + self.conv_1d = nn.Conv2d( + num_filts * 2, + num_filts * 2, + (self.ip_height_rs // 8, 1), + padding=0, + ) + self.conv_1d_bn = nn.BatchNorm2d(num_filts * 2) + self.att = SelfAttention(num_filts * 2, num_filts * 2) # decoder - self.conv_up_2 = ConvBlockUpF(num_filts*2, num_filts//2, self.ip_height_rs//8) - self.conv_up_3 = ConvBlockUpF(num_filts//2, num_filts//4, self.ip_height_rs//4) - self.conv_up_4 = ConvBlockUpF(num_filts//4, num_filts//4, self.ip_height_rs//2) + self.conv_up_2 = ConvBlockUpF( + num_filts * 2, num_filts // 2, self.ip_height_rs // 8 + ) + self.conv_up_3 = ConvBlockUpF( + num_filts // 2, num_filts // 4, self.ip_height_rs // 4 + ) + self.conv_up_4 = ConvBlockUpF( + num_filts // 4, num_filts // 4, self.ip_height_rs // 2 + ) # output # +1 to include background class for class output - self.conv_op = nn.Conv2d(num_filts//4, num_filts//4, kernel_size=3, padding=1) - self.conv_op_bn = nn.BatchNorm2d(num_filts//4) - self.conv_size_op = nn.Conv2d(num_filts//4, 2, kernel_size=1, padding=0) - self.conv_classes_op = nn.Conv2d(num_filts//4, self.num_classes+1, kernel_size=1, padding=0) + self.conv_op = nn.Conv2d( + num_filts // 4, num_filts // 4, kernel_size=3, padding=1 + ) + self.conv_op_bn = nn.BatchNorm2d(num_filts // 4) + self.conv_size_op = nn.Conv2d( + num_filts // 4, 2, kernel_size=1, padding=0 + ) + self.conv_classes_op = nn.Conv2d( + num_filts // 4, self.num_classes + 1, kernel_size=1, padding=0 + ) if self.emb_dim > 0: - self.conv_emb = nn.Conv2d(num_filts, self.emb_dim, kernel_size=1, padding=0) + self.conv_emb = nn.Conv2d( + num_filts, self.emb_dim, kernel_size=1, padding=0 + ) - - def forward(self, ip, return_feats=False): + def forward(self, ip, return_feats=False) -> ModelOutput: # encoder x1 = self.conv_dn_0(ip) @@ -59,134 +114,218 @@ class Net2DFast(nn.Module): # bottleneck x = F.relu(self.conv_1d_bn(self.conv_1d(x3)), inplace=True) x = self.att(x) - x = x.repeat([1,1,self.bneck_height*4,1]) + x = x.repeat([1, 1, self.bneck_height * 4, 1]) # decoder - x = self.conv_up_2(x+x3) - x = self.conv_up_3(x+x2) - x = self.conv_up_4(x+x1) + x = self.conv_up_2(x + x3) + x = self.conv_up_3(x + x2) + x = self.conv_up_4(x + x1) # output x = F.relu(self.conv_op_bn(self.conv_op(x)), inplace=True) - cls = self.conv_classes_op(x) + cls = self.conv_classes_op(x) comb = torch.softmax(cls, 1) - op = {} - op['pred_det'] = comb[:,:-1, :, :].sum(1).unsqueeze(1) - op['pred_size'] = F.relu(self.conv_size_op(x), inplace=True) - op['pred_class'] = comb - op['pred_class_un_norm'] = cls - if self.emb_dim > 0: - op['pred_emb'] = self.conv_emb(x) - if return_feats: - op['features'] = x - - return op + return ModelOutput( + pred_det=comb[:, :-1, :, :].sum(1).unsqueeze(1), + pred_size=F.relu(self.conv_size_op(x), inplace=True), + pred_class=comb, + pred_class_un_norm=cls, + features=x, + ) class Net2DFastNoAttn(nn.Module): - def __init__(self, num_filts, num_classes=0, emb_dim=0, ip_height=128, resize_factor=0.5): - super(Net2DFastNoAttn, self).__init__() + def __init__( + self, + num_filts, + num_classes=0, + emb_dim=0, + ip_height=128, + resize_factor=0.5, + ): + super().__init__() self.num_classes = num_classes self.emb_dim = emb_dim self.num_filts = num_filts self.resize_factor = resize_factor self.ip_height_rs = ip_height - self.bneck_height = self.ip_height_rs//32 + self.bneck_height = self.ip_height_rs // 32 - self.conv_dn_0 = ConvBlockDownCoordF(1, num_filts//4, self.ip_height_rs, k_size=3, pad_size=1, stride=1) - self.conv_dn_1 = ConvBlockDownCoordF(num_filts//4, num_filts//2, self.ip_height_rs//2, k_size=3, pad_size=1, stride=1) - self.conv_dn_2 = ConvBlockDownCoordF(num_filts//2, num_filts, self.ip_height_rs//4, k_size=3, pad_size=1, stride=1) - self.conv_dn_3 = nn.Conv2d(num_filts, num_filts*2, 3, padding=1) - self.conv_dn_3_bn = nn.BatchNorm2d(num_filts*2) + self.conv_dn_0 = ConvBlockDownCoordF( + 1, + num_filts // 4, + self.ip_height_rs, + k_size=3, + pad_size=1, + stride=1, + ) + self.conv_dn_1 = ConvBlockDownCoordF( + num_filts // 4, + num_filts // 2, + self.ip_height_rs // 2, + k_size=3, + pad_size=1, + stride=1, + ) + self.conv_dn_2 = ConvBlockDownCoordF( + num_filts // 2, + num_filts, + self.ip_height_rs // 4, + k_size=3, + pad_size=1, + stride=1, + ) + self.conv_dn_3 = nn.Conv2d(num_filts, num_filts * 2, 3, padding=1) + self.conv_dn_3_bn = nn.BatchNorm2d(num_filts * 2) - self.conv_1d = nn.Conv2d(num_filts*2, num_filts*2, (self.ip_height_rs//8,1), padding=0) - self.conv_1d_bn = nn.BatchNorm2d(num_filts*2) + self.conv_1d = nn.Conv2d( + num_filts * 2, + num_filts * 2, + (self.ip_height_rs // 8, 1), + padding=0, + ) + self.conv_1d_bn = nn.BatchNorm2d(num_filts * 2) - - self.conv_up_2 = ConvBlockUpF(num_filts*2, num_filts//2, self.ip_height_rs//8) - self.conv_up_3 = ConvBlockUpF(num_filts//2, num_filts//4, self.ip_height_rs//4) - self.conv_up_4 = ConvBlockUpF(num_filts//4, num_filts//4, self.ip_height_rs//2) + self.conv_up_2 = ConvBlockUpF( + num_filts * 2, num_filts // 2, self.ip_height_rs // 8 + ) + self.conv_up_3 = ConvBlockUpF( + num_filts // 2, num_filts // 4, self.ip_height_rs // 4 + ) + self.conv_up_4 = ConvBlockUpF( + num_filts // 4, num_filts // 4, self.ip_height_rs // 2 + ) # output # +1 to include background class for class output - self.conv_op = nn.Conv2d(num_filts//4, num_filts//4, kernel_size=3, padding=1) - self.conv_op_bn = nn.BatchNorm2d(num_filts//4) - self.conv_size_op = nn.Conv2d(num_filts//4, 2, kernel_size=1, padding=0) - self.conv_classes_op = nn.Conv2d(num_filts//4, self.num_classes+1, kernel_size=1, padding=0) + self.conv_op = nn.Conv2d( + num_filts // 4, num_filts // 4, kernel_size=3, padding=1 + ) + self.conv_op_bn = nn.BatchNorm2d(num_filts // 4) + self.conv_size_op = nn.Conv2d( + num_filts // 4, 2, kernel_size=1, padding=0 + ) + self.conv_classes_op = nn.Conv2d( + num_filts // 4, self.num_classes + 1, kernel_size=1, padding=0 + ) if self.emb_dim > 0: - self.conv_emb = nn.Conv2d(num_filts, self.emb_dim, kernel_size=1, padding=0) - - def forward(self, ip, return_feats=False): + self.conv_emb = nn.Conv2d( + num_filts, self.emb_dim, kernel_size=1, padding=0 + ) + def forward(self, ip, return_feats=False) -> ModelOutput: x1 = self.conv_dn_0(ip) x2 = self.conv_dn_1(x1) x3 = self.conv_dn_2(x2) x3 = F.relu(self.conv_dn_3_bn(self.conv_dn_3(x3)), inplace=True) x = F.relu(self.conv_1d_bn(self.conv_1d(x3)), inplace=True) - x = x.repeat([1,1,self.bneck_height*4,1]) + x = x.repeat([1, 1, self.bneck_height * 4, 1]) - x = self.conv_up_2(x+x3) - x = self.conv_up_3(x+x2) - x = self.conv_up_4(x+x1) + x = self.conv_up_2(x + x3) + x = self.conv_up_3(x + x2) + x = self.conv_up_4(x + x1) x = F.relu(self.conv_op_bn(self.conv_op(x)), inplace=True) - cls = self.conv_classes_op(x) + cls = self.conv_classes_op(x) comb = torch.softmax(cls, 1) - op = {} - op['pred_det'] = comb[:,:-1, :, :].sum(1).unsqueeze(1) - op['pred_size'] = F.relu(self.conv_size_op(x), inplace=True) - op['pred_class'] = comb - op['pred_class_un_norm'] = cls - if self.emb_dim > 0: - op['pred_emb'] = self.conv_emb(x) - if return_feats: - op['features'] = x - - return op + return ModelOutput( + pred_det=comb[:, :-1, :, :].sum(1).unsqueeze(1), + pred_size=F.relu(self.conv_size_op(x), inplace=True), + pred_class=comb, + pred_class_un_norm=cls, + features=x, + ) class Net2DFastNoCoordConv(nn.Module): - def __init__(self, num_filts, num_classes=0, emb_dim=0, ip_height=128, resize_factor=0.5): - super(Net2DFastNoCoordConv, self).__init__() + def __init__( + self, + num_filts, + num_classes=0, + emb_dim=0, + ip_height=128, + resize_factor=0.5, + ): + super().__init__() self.num_classes = num_classes self.emb_dim = emb_dim self.num_filts = num_filts self.resize_factor = resize_factor self.ip_height_rs = ip_height - self.bneck_height = self.ip_height_rs//32 + self.bneck_height = self.ip_height_rs // 32 - self.conv_dn_0 = ConvBlockDownStandard(1, num_filts//4, self.ip_height_rs, k_size=3, pad_size=1, stride=1) - self.conv_dn_1 = ConvBlockDownStandard(num_filts//4, num_filts//2, self.ip_height_rs//2, k_size=3, pad_size=1, stride=1) - self.conv_dn_2 = ConvBlockDownStandard(num_filts//2, num_filts, self.ip_height_rs//4, k_size=3, pad_size=1, stride=1) - self.conv_dn_3 = nn.Conv2d(num_filts, num_filts*2, 3, padding=1) - self.conv_dn_3_bn = nn.BatchNorm2d(num_filts*2) + self.conv_dn_0 = ConvBlockDownStandard( + 1, + num_filts // 4, + self.ip_height_rs, + k_size=3, + pad_size=1, + stride=1, + ) + self.conv_dn_1 = ConvBlockDownStandard( + num_filts // 4, + num_filts // 2, + self.ip_height_rs // 2, + k_size=3, + pad_size=1, + stride=1, + ) + self.conv_dn_2 = ConvBlockDownStandard( + num_filts // 2, + num_filts, + self.ip_height_rs // 4, + k_size=3, + pad_size=1, + stride=1, + ) + self.conv_dn_3 = nn.Conv2d(num_filts, num_filts * 2, 3, padding=1) + self.conv_dn_3_bn = nn.BatchNorm2d(num_filts * 2) - self.conv_1d = nn.Conv2d(num_filts*2, num_filts*2, (self.ip_height_rs//8,1), padding=0) - self.conv_1d_bn = nn.BatchNorm2d(num_filts*2) + self.conv_1d = nn.Conv2d( + num_filts * 2, + num_filts * 2, + (self.ip_height_rs // 8, 1), + padding=0, + ) + self.conv_1d_bn = nn.BatchNorm2d(num_filts * 2) - self.att = SelfAttention(num_filts*2, num_filts*2) + self.att = SelfAttention(num_filts * 2, num_filts * 2) - self.conv_up_2 = ConvBlockUpStandard(num_filts*2, num_filts//2, self.ip_height_rs//8) - self.conv_up_3 = ConvBlockUpStandard(num_filts//2, num_filts//4, self.ip_height_rs//4) - self.conv_up_4 = ConvBlockUpStandard(num_filts//4, num_filts//4, self.ip_height_rs//2) + self.conv_up_2 = ConvBlockUpStandard( + num_filts * 2, num_filts // 2, self.ip_height_rs // 8 + ) + self.conv_up_3 = ConvBlockUpStandard( + num_filts // 2, num_filts // 4, self.ip_height_rs // 4 + ) + self.conv_up_4 = ConvBlockUpStandard( + num_filts // 4, num_filts // 4, self.ip_height_rs // 2 + ) # output # +1 to include background class for class output - self.conv_op = nn.Conv2d(num_filts//4, num_filts//4, kernel_size=3, padding=1) - self.conv_op_bn = nn.BatchNorm2d(num_filts//4) - self.conv_size_op = nn.Conv2d(num_filts//4, 2, kernel_size=1, padding=0) - self.conv_classes_op = nn.Conv2d(num_filts//4, self.num_classes+1, kernel_size=1, padding=0) + self.conv_op = nn.Conv2d( + num_filts // 4, num_filts // 4, kernel_size=3, padding=1 + ) + self.conv_op_bn = nn.BatchNorm2d(num_filts // 4) + self.conv_size_op = nn.Conv2d( + num_filts // 4, 2, kernel_size=1, padding=0 + ) + self.conv_classes_op = nn.Conv2d( + num_filts // 4, self.num_classes + 1, kernel_size=1, padding=0 + ) if self.emb_dim > 0: - self.conv_emb = nn.Conv2d(num_filts, self.emb_dim, kernel_size=1, padding=0) + self.conv_emb = nn.Conv2d( + num_filts, self.emb_dim, kernel_size=1, padding=0 + ) - def forward(self, ip, return_feats=False): + def forward(self, ip, return_feats=False) -> ModelOutput: x1 = self.conv_dn_0(ip) x2 = self.conv_dn_1(x1) @@ -195,24 +334,21 @@ class Net2DFastNoCoordConv(nn.Module): x = F.relu(self.conv_1d_bn(self.conv_1d(x3)), inplace=True) x = self.att(x) - x = x.repeat([1,1,self.bneck_height*4,1]) + x = x.repeat([1, 1, self.bneck_height * 4, 1]) - x = self.conv_up_2(x+x3) - x = self.conv_up_3(x+x2) - x = self.conv_up_4(x+x1) + x = self.conv_up_2(x + x3) + x = self.conv_up_3(x + x2) + x = self.conv_up_4(x + x1) x = F.relu(self.conv_op_bn(self.conv_op(x)), inplace=True) - cls = self.conv_classes_op(x) + cls = self.conv_classes_op(x) comb = torch.softmax(cls, 1) - op = {} - op['pred_det'] = comb[:,:-1, :, :].sum(1).unsqueeze(1) - op['pred_size'] = F.relu(self.conv_size_op(x), inplace=True) - op['pred_class'] = comb - op['pred_class_un_norm'] = cls - if self.emb_dim > 0: - op['pred_emb'] = self.conv_emb(x) - if return_feats: - op['features'] = x - - return op + return ModelOutput( + pred_det=comb[:, :-1, :, :].sum(1).unsqueeze(1), + pred_size=F.relu(self.conv_size_op(x), inplace=True), + pred_class=comb, + pred_class_un_norm=cls, + pred_emb=self.conv_emb(x) if self.emb_dim > 0 else None, + features=x, + ) diff --git a/bat_detect/detector/parameters.py b/bat_detect/detector/parameters.py index 10276eb..f733062 100644 --- a/bat_detect/detector/parameters.py +++ b/bat_detect/detector/parameters.py @@ -1,108 +1,235 @@ -import numpy as np -import os import datetime +import os + +from bat_detect.types import ( + ProcessingConfiguration, + SpectrogramParameters, +) + +TARGET_SAMPLERATE_HZ = 256000 +FFT_WIN_LENGTH_S = 512 / 256000.0 +FFT_OVERLAP = 0.75 +MAX_FREQ_HZ = 120000 +MIN_FREQ_HZ = 10000 +RESIZE_FACTOR = 0.5 +SPEC_DIVIDE_FACTOR = 32 +SPEC_HEIGHT = 256 +SCALE_RAW_AUDIO = False +DETECTION_THRESHOLD = 0.01 +NMS_KERNEL_SIZE = 9 +NMS_TOP_K_PER_SEC = 200 +SPEC_SCALE = "pcen" +DENOISE_SPEC_AVG = True +MAX_SCALE_SPEC = False + + +DEFAULT_MODEL_PATH = os.path.join( + os.path.dirname(os.path.dirname(__file__)), + "models", + "Net2DFast_UK_same.pth.tar", +) + + +DEFAULT_SPECTROGRAM_PARAMETERS: SpectrogramParameters = { + "fft_win_length": FFT_WIN_LENGTH_S, + "fft_overlap": FFT_OVERLAP, + "spec_height": SPEC_HEIGHT, + "resize_factor": RESIZE_FACTOR, + "spec_divide_factor": SPEC_DIVIDE_FACTOR, + "max_freq": MAX_FREQ_HZ, + "min_freq": MIN_FREQ_HZ, + "spec_scale": SPEC_SCALE, + "denoise_spec_avg": DENOISE_SPEC_AVG, + "max_scale_spec": MAX_SCALE_SPEC, +} + + +DEFAULT_PROCESSING_CONFIGURATIONS: ProcessingConfiguration = { + "detection_threshold": DETECTION_THRESHOLD, + "spec_slices": False, + "chunk_size": 3, + "spec_features": False, + "cnn_features": False, + "quiet": True, + "target_samp_rate": TARGET_SAMPLERATE_HZ, + "fft_win_length": FFT_WIN_LENGTH_S, + "fft_overlap": FFT_OVERLAP, + "resize_factor": RESIZE_FACTOR, + "spec_divide_factor": SPEC_DIVIDE_FACTOR, + "spec_height": SPEC_HEIGHT, + "scale_raw_audio": SCALE_RAW_AUDIO, + "class_names": [], + "time_expansion": 1, + "top_n": 3, + "return_raw_preds": False, + "max_duration": None, + "nms_kernel_size": NMS_KERNEL_SIZE, + "max_freq": MAX_FREQ_HZ, + "min_freq": MIN_FREQ_HZ, + "nms_top_k_per_sec": NMS_TOP_K_PER_SEC, + "spec_scale": SPEC_SCALE, + "denoise_spec_avg": DENOISE_SPEC_AVG, + "max_scale_spec": MAX_SCALE_SPEC, +} def mk_dir(path): if not os.path.isdir(path): os.makedirs(path) - - -def get_params(make_dirs=False, exps_dir='../../experiments/'): + + +def get_params(make_dirs=False, exps_dir="../../experiments/"): params = {} - params['model_name'] = 'Net2DFast' # Net2DFast, Net2DSkip, Net2DSimple, Net2DSkipDS, Net2DRN - params['num_filters'] = 128 + params[ + "model_name" + ] = "Net2DFast" # Net2DFast, Net2DSkip, Net2DSimple, Net2DSkipDS, Net2DRN + params["num_filters"] = 128 now_str = datetime.datetime.now().strftime("%Y_%m_%d__%H_%M_%S") - model_name = now_str + '.pth.tar' - params['experiment'] = os.path.join(exps_dir, now_str, '') - params['model_file_name'] = os.path.join(params['experiment'], model_name) - params['op_im_dir'] = os.path.join(params['experiment'], 'op_ims', '') - params['op_im_dir_test'] = os.path.join(params['experiment'], 'op_ims_test', '') - #params['notes'] = '' # can save notes about an experiment here - + model_name = now_str + ".pth.tar" + params["experiment"] = os.path.join(exps_dir, now_str, "") + params["model_file_name"] = os.path.join(params["experiment"], model_name) + params["op_im_dir"] = os.path.join(params["experiment"], "op_ims", "") + params["op_im_dir_test"] = os.path.join( + params["experiment"], "op_ims_test", "" + ) + # params['notes'] = '' # can save notes about an experiment here # spec parameters - params['target_samp_rate'] = 256000 # resamples all audio so that it is at this rate - params['fft_win_length'] = 512 / 256000.0 # in milliseconds, amount of time per stft time step - params['fft_overlap'] = 0.75 # stft window overlap + params[ + "target_samp_rate" + ] = TARGET_SAMPLERATE_HZ # resamples all audio so that it is at this rate + params[ + "fft_win_length" + ] = FFT_WIN_LENGTH_S # in milliseconds, amount of time per stft time step + params["fft_overlap"] = FFT_OVERLAP # stft window overlap - params['max_freq'] = 120000 # in Hz, everything above this will be discarded - params['min_freq'] = 10000 # in Hz, everything below this will be discarded + params[ + "max_freq" + ] = MAX_FREQ_HZ # in Hz, everything above this will be discarded + params[ + "min_freq" + ] = MIN_FREQ_HZ # in Hz, everything below this will be discarded - params['resize_factor'] = 0.5 # resize so the spectrogram at the input of the network - params['spec_height'] = 256 # units are number of frequency bins (before resizing is performed) - params['spec_train_width'] = 512 # units are number of time steps (before resizing is performed) - params['spec_divide_factor'] = 32 # spectrogram should be divisible by this amount in width and height + params[ + "resize_factor" + ] = RESIZE_FACTOR # resize so the spectrogram at the input of the network + params[ + "spec_height" + ] = SPEC_HEIGHT # units are number of frequency bins (before resizing is performed) + params[ + "spec_train_width" + ] = 512 # units are number of time steps (before resizing is performed) + params[ + "spec_divide_factor" + ] = SPEC_DIVIDE_FACTOR # spectrogram should be divisible by this amount in width and height # spec processing params - params['denoise_spec_avg'] = True # removes the mean for each frequency band - params['scale_raw_audio'] = False # scales the raw audio to [-1, 1] - params['max_scale_spec'] = False # scales the spectrogram so that it is max 1 - params['spec_scale'] = 'pcen' # 'log', 'pcen', 'none' + params[ + "denoise_spec_avg" + ] = DENOISE_SPEC_AVG # removes the mean for each frequency band + params[ + "scale_raw_audio" + ] = SCALE_RAW_AUDIO # scales the raw audio to [-1, 1] + params[ + "max_scale_spec" + ] = MAX_SCALE_SPEC # scales the spectrogram so that it is max 1 + params["spec_scale"] = SPEC_SCALE # 'log', 'pcen', 'none' # detection params - params['detection_overlap'] = 0.01 # has to be within this number of ms to count as detection - params['ignore_start_end'] = 0.01 # if start of GT calls are within this time from the start/end of file ignore - params['detection_threshold'] = 0.01 # the smaller this is the better the recall will be - params['nms_kernel_size'] = 9 - params['nms_top_k_per_sec'] = 200 # keep top K highest predictions per second of audio - params['target_sigma'] = 2.0 + params[ + "detection_overlap" + ] = 0.01 # has to be within this number of ms to count as detection + params[ + "ignore_start_end" + ] = 0.01 # if start of GT calls are within this time from the start/end of file ignore + params[ + "detection_threshold" + ] = DETECTION_THRESHOLD # the smaller this is the better the recall will be + params[ + "nms_kernel_size" + ] = NMS_KERNEL_SIZE # size of the kernel for non-max suppression + params[ + "nms_top_k_per_sec" + ] = NMS_TOP_K_PER_SEC # keep top K highest predictions per second of audio + params["target_sigma"] = 2.0 # augmentation params - params['aug_prob'] = 0.20 # augmentations will be performed with this probability - params['augment_at_train'] = True - params['augment_at_train_combine'] = True - params['echo_max_delay'] = 0.005 # simulate echo by adding copy of raw audio - params['stretch_squeeze_delta'] = 0.04 # stretch or squeeze spec - params['mask_max_time_perc'] = 0.05 # max mask size - here percentage, not ideal - params['mask_max_freq_perc'] = 0.10 # max mask size - here percentage, not ideal - params['spec_amp_scaling'] = 2.0 # multiply the "volume" by 0:X times current amount - params['aug_sampling_rates'] = [220500, 256000, 300000, 312500, 384000, 441000, 500000] + params[ + "aug_prob" + ] = 0.20 # augmentations will be performed with this probability + params["augment_at_train"] = True + params["augment_at_train_combine"] = True + params[ + "echo_max_delay" + ] = 0.005 # simulate echo by adding copy of raw audio + params["stretch_squeeze_delta"] = 0.04 # stretch or squeeze spec + params[ + "mask_max_time_perc" + ] = 0.05 # max mask size - here percentage, not ideal + params[ + "mask_max_freq_perc" + ] = 0.10 # max mask size - here percentage, not ideal + params[ + "spec_amp_scaling" + ] = 2.0 # multiply the "volume" by 0:X times current amount + params["aug_sampling_rates"] = [ + 220500, + 256000, + 300000, + 312500, + 384000, + 441000, + 500000, + ] # loss params - params['train_loss'] = 'focal' # mse or focal - params['det_loss_weight'] = 1.0 # weight for the detection part of the loss - params['size_loss_weight'] = 0.1 # weight for the bbox size loss - params['class_loss_weight'] = 2.0 # weight for the classification loss - params['individual_loss_weight'] = 0.0 # not used - if params['individual_loss_weight'] == 0.0: - params['emb_dim'] = 0 # number of dimensions used for individual id embedding + params["train_loss"] = "focal" # mse or focal + params["det_loss_weight"] = 1.0 # weight for the detection part of the loss + params["size_loss_weight"] = 0.1 # weight for the bbox size loss + params["class_loss_weight"] = 2.0 # weight for the classification loss + params["individual_loss_weight"] = 0.0 # not used + if params["individual_loss_weight"] == 0.0: + params[ + "emb_dim" + ] = 0 # number of dimensions used for individual id embedding else: - params['emb_dim'] = 3 + params["emb_dim"] = 3 # train params - params['lr'] = 0.001 - params['batch_size'] = 8 - params['num_workers'] = 4 - params['num_epochs'] = 200 - params['num_eval_epochs'] = 5 # run evaluation every X epochs - params['device'] = 'cuda' - params['save_test_image_during_train'] = False - params['save_test_image_after_train'] = True + params["lr"] = 0.001 + params["batch_size"] = 8 + params["num_workers"] = 4 + params["num_epochs"] = 200 + params["num_eval_epochs"] = 5 # run evaluation every X epochs + params["device"] = "cuda" + params["save_test_image_during_train"] = False + params["save_test_image_after_train"] = True - params['convert_to_genus'] = False - params['genus_mapping'] = [] - params['class_names'] = [] - params['classes_to_ignore'] = ['', ' ', 'Unknown', 'Not Bat'] - params['generic_class'] = ['Bat'] - params['events_of_interest'] = ['Echolocation'] # will ignore all other types of events e.g. social calls + params["convert_to_genus"] = False + params["genus_mapping"] = [] + params["class_names"] = [] + params["classes_to_ignore"] = ["", " ", "Unknown", "Not Bat"] + params["generic_class"] = ["Bat"] + params["events_of_interest"] = [ + "Echolocation" + ] # will ignore all other types of events e.g. social calls # the classes in this list are standardized during training so that the same low and high freq are used - params['standardize_classs_names'] = [] + params["standardize_classs_names"] = [] # create directories if make_dirs: - print('Model name : ' + params['model_name']) - print('Model file : ' + params['model_file_name']) - print('Experiment : ' + params['experiment']) + print("Model name : " + params["model_name"]) + print("Model file : " + params["model_file_name"]) + print("Experiment : " + params["experiment"]) - mk_dir(params['experiment']) - if params['save_test_image_during_train']: - mk_dir(params['op_im_dir']) - if params['save_test_image_after_train']: - mk_dir(params['op_im_dir_test']) - mk_dir(os.path.dirname(params['model_file_name'])) + mk_dir(params["experiment"]) + if params["save_test_image_during_train"]: + mk_dir(params["op_im_dir"]) + if params["save_test_image_after_train"]: + mk_dir(params["op_im_dir_test"]) + mk_dir(os.path.dirname(params["model_file_name"])) return params diff --git a/bat_detect/detector/post_process.py b/bat_detect/detector/post_process.py index 757831f..5aa6895 100644 --- a/bat_detect/detector/post_process.py +++ b/bat_detect/detector/post_process.py @@ -1,88 +1,168 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F +"""Post-processing of the output of the model.""" +from typing import List, Tuple, Union + import numpy as np -np.seterr(divide='ignore', invalid='ignore') +import torch +from torch import nn + +from bat_detect.detector.models import ModelOutput +from bat_detect.types import NonMaximumSuppressionConfig, PredictionResults + +np.seterr(divide="ignore", invalid="ignore") -def x_coords_to_time(x_pos, sampling_rate, fft_win_length, fft_overlap): - nfft = int(fft_win_length*sampling_rate) - noverlap = int(fft_overlap*nfft) - return ((x_pos*(nfft - noverlap)) + noverlap) / sampling_rate - #return (1.0 - fft_overlap) * fft_win_length * (x_pos + 0.5) # 0.5 is for center of temporal window +def x_coords_to_time( + x_pos: float, + sampling_rate: int, + fft_win_length: float, + fft_overlap: float, +) -> float: + """Convert x coordinates of spectrogram to time in seconds. + + Args: + x_pos: X position of the detection in pixels. + sampling_rate: Sampling rate of the audio in Hz. + fft_win_length: Length of the FFT window in seconds. + fft_overlap: Overlap of the FFT windows in seconds. + + Returns: + Time in seconds. + """ + nfft = int(fft_win_length * sampling_rate) + noverlap = int(fft_overlap * nfft) + return ((x_pos * (nfft - noverlap)) + noverlap) / sampling_rate def overall_class_pred(det_prob, class_prob): - weighted_pred = (class_prob*det_prob).sum(1) + weighted_pred = (class_prob * det_prob).sum(1) return weighted_pred / weighted_pred.sum() -def run_nms(outputs, params, sampling_rate): +def run_nms( + outputs: ModelOutput, + params: NonMaximumSuppressionConfig, + sampling_rate: np.ndarray, +) -> Tuple[List[PredictionResults], List[np.ndarray]]: + """Run non-maximum suppression on the output of the model. - pred_det = outputs['pred_det'] # probability of box - pred_size = outputs['pred_size'] # box size + Model outputs processed are expected to have a batch dimension. + Each element of the batch is processed independently. The + result is a pair of lists, one for the predictions and one for + the features. Each element of the lists corresponds to one + element of the batch. + """ + pred_det, pred_size, pred_class, _, features = outputs - pred_det_nms = non_max_suppression(pred_det, params['nms_kernel_size']) - freq_rescale = (params['max_freq'] - params['min_freq']) /pred_det.shape[-2] + pred_det_nms = non_max_suppression(pred_det, params["nms_kernel_size"]) + freq_rescale = (params["max_freq"] - params["min_freq"]) / pred_det.shape[ + -2 + ] - # NOTE there will be small differences depending on which sampling rate is chosen - # as we are choosing the same sampling rate for the entire batch - duration = x_coords_to_time(pred_det.shape[-1], sampling_rate[0].item(), - params['fft_win_length'], params['fft_overlap']) - top_k = int(duration * params['nms_top_k_per_sec']) + # NOTE: there will be small differences depending on which sampling rate + # is chosen as we are choosing the same sampling rate for the entire batch + duration = x_coords_to_time( + pred_det.shape[-1], + int(sampling_rate[0].item()), + params["fft_win_length"], + params["fft_overlap"], + ) + top_k = int(duration * params["nms_top_k_per_sec"]) scores, y_pos, x_pos = get_topk_scores(pred_det_nms, top_k) # loop over batch to save outputs - preds = [] - feats = [] - for ii in range(pred_det_nms.shape[0]): + preds: List[PredictionResults] = [] + feats: List[np.ndarray] = [] + for num_detection in range(pred_det_nms.shape[0]): # get valid indices - inds_ord = torch.argsort(x_pos[ii, :]) - valid_inds = scores[ii, inds_ord] > params['detection_threshold'] + inds_ord = torch.argsort(x_pos[num_detection, :]) + valid_inds = ( + scores[num_detection, inds_ord] > params["detection_threshold"] + ) valid_inds = inds_ord[valid_inds] # create result dictionary pred = {} - pred['det_probs'] = scores[ii, valid_inds] - pred['x_pos'] = x_pos[ii, valid_inds] - pred['y_pos'] = y_pos[ii, valid_inds] - pred['bb_width'] = pred_size[ii, 0, pred['y_pos'], pred['x_pos']] - pred['bb_height'] = pred_size[ii, 1, pred['y_pos'], pred['x_pos']] - pred['start_times'] = x_coords_to_time(pred['x_pos'].float() / params['resize_factor'], - sampling_rate[ii].item(), params['fft_win_length'], params['fft_overlap']) - pred['end_times'] = x_coords_to_time((pred['x_pos'].float()+pred['bb_width']) / params['resize_factor'], - sampling_rate[ii].item(), params['fft_win_length'], params['fft_overlap']) - pred['low_freqs'] = (pred_size[ii].shape[1] - pred['y_pos'].float())*freq_rescale + params['min_freq'] - pred['high_freqs'] = pred['low_freqs'] + pred['bb_height']*freq_rescale + pred["det_probs"] = scores[num_detection, valid_inds] + pred["x_pos"] = x_pos[num_detection, valid_inds] + pred["y_pos"] = y_pos[num_detection, valid_inds] + pred["bb_width"] = pred_size[ + num_detection, + 0, + pred["y_pos"], + pred["x_pos"], + ] + pred["bb_height"] = pred_size[ + num_detection, + 1, + pred["y_pos"], + pred["x_pos"], + ] + pred["start_times"] = x_coords_to_time( + pred["x_pos"].float() / params["resize_factor"], + int(sampling_rate[num_detection].item()), + params["fft_win_length"], + params["fft_overlap"], + ) + pred["end_times"] = x_coords_to_time( + (pred["x_pos"].float() + pred["bb_width"]) + / params["resize_factor"], + int(sampling_rate[num_detection].item()), + params["fft_win_length"], + params["fft_overlap"], + ) + pred["low_freqs"] = ( + pred_size[num_detection].shape[1] - pred["y_pos"].float() + ) * freq_rescale + params["min_freq"] + pred["high_freqs"] = ( + pred["low_freqs"] + pred["bb_height"] * freq_rescale + ) # extract the per class votes - if 'pred_class' in outputs: - pred['class_probs'] = outputs['pred_class'][ii, :, y_pos[ii, valid_inds], x_pos[ii, valid_inds]] + if pred_class is not None: + pred["class_probs"] = pred_class[ + num_detection, + :, + y_pos[num_detection, valid_inds], + x_pos[num_detection, valid_inds], + ] # extract the model features - if 'features' in outputs: - feat = outputs['features'][ii, :, y_pos[ii, valid_inds], x_pos[ii, valid_inds]].transpose(0, 1) - feat = feat.cpu().numpy().astype(np.float32) + if features is not None: + feat = features[ + num_detection, + :, + y_pos[num_detection, valid_inds], + x_pos[num_detection, valid_inds], + ].transpose(0, 1) + feat = feat.detach().numpy().astype(np.float32) feats.append(feat) # convert to numpy - for kk in pred.keys(): - pred[kk] = pred[kk].cpu().numpy().astype(np.float32) - preds.append(pred) + for key, value in pred.items(): + pred[key] = value.detach().numpy().astype(np.float32) + + preds.append(pred) # type: ignore return preds, feats -def non_max_suppression(heat, kernel_size): +def non_max_suppression( + heat: torch.Tensor, + kernel_size: Union[int, Tuple[int, int]], +): # kernel can be an int or list/tuple - if type(kernel_size) is int: + if isinstance(kernel_size, int): kernel_size_h = kernel_size kernel_size_w = kernel_size + else: + kernel_size_h, kernel_size_w = kernel_size pad_h = (kernel_size_h - 1) // 2 pad_w = (kernel_size_w - 1) // 2 - hmax = nn.functional.max_pool2d(heat, (kernel_size_h, kernel_size_w), stride=1, padding=(pad_h, pad_w)) + hmax = nn.functional.max_pool2d( + heat, (kernel_size_h, kernel_size_w), stride=1, padding=(pad_h, pad_w) + ) keep = (hmax == heat).float() return heat * keep @@ -94,7 +174,7 @@ def get_topk_scores(scores, K): topk_scores, topk_inds = torch.topk(scores.view(batch, -1), K) topk_inds = topk_inds % (height * width) - topk_ys = torch.div(topk_inds, width, rounding_mode='floor').long() - topk_xs = (topk_inds % width).long() + topk_ys = torch.div(topk_inds, width, rounding_mode="floor").long() + topk_xs = (topk_inds % width).long() return topk_scores, topk_ys, topk_xs diff --git a/bat_detect/evaluate/__init__.py b/bat_detect/evaluate/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/bat_detect/evaluate/evaluate_models.py b/bat_detect/evaluate/evaluate_models.py index 0fc8ae9..bf70f15 100644 --- a/bat_detect/evaluate/evaluate_models.py +++ b/bat_detect/evaluate/evaluate_models.py @@ -2,67 +2,74 @@ Evaluates trained model on test set and generates plots. """ -import numpy as np -import sys -import os +import argparse import copy import json +import os + +import numpy as np import pandas as pd from sklearn.ensemble import RandomForestClassifier -import argparse -sys.path.append('../../') -import bat_detect.utils.detector_utils as du -import bat_detect.train.train_utils as tu -import bat_detect.detector.parameters as parameters +from bat_detect.detector import parameters import bat_detect.train.evaluate as evl +import bat_detect.train.train_utils as tu +import bat_detect.utils.detector_utils as du import bat_detect.utils.plot_utils as pu def get_blank_annotation(ip_str): res = {} - res['class_name'] = '' - res['duration'] = -1 - res['id'] = ''# fileName - res['issues'] = False - res['notes'] = ip_str - res['time_exp'] = 1 - res['annotated'] = False - res['annotation'] = [] + res["class_name"] = "" + res["duration"] = -1 + res["id"] = "" # fileName + res["issues"] = False + res["notes"] = ip_str + res["time_exp"] = 1 + res["annotated"] = False + res["annotation"] = [] ann = {} - ann['class'] = '' - ann['event'] = 'Echolocation' - ann['individual'] = -1 - ann['start_time'] = -1 - ann['end_time'] = -1 - ann['low_freq'] = -1 - ann['high_freq'] = -1 - ann['confidence'] = -1 + ann["class"] = "" + ann["event"] = "Echolocation" + ann["individual"] = -1 + ann["start_time"] = -1 + ann["end_time"] = -1 + ann["low_freq"] = -1 + ann["high_freq"] = -1 + ann["confidence"] = -1 return copy.deepcopy(res), copy.deepcopy(ann) def create_genus_mapping(gt_test, preds, class_names): # rolls the per class predictions and ground truth back up to genus level - class_names_genus, cls_to_genus = np.unique([cc.split(' ')[0] for cc in class_names], return_inverse=True) - genus_to_cls_map = [np.where(np.array(cls_to_genus) == cc)[0] for cc in range(len(class_names_genus))] + class_names_genus, cls_to_genus = np.unique( + [cc.split(" ")[0] for cc in class_names], return_inverse=True + ) + genus_to_cls_map = [ + np.where(np.array(cls_to_genus) == cc)[0] + for cc in range(len(class_names_genus)) + ] gt_test_g = [] for gg in gt_test: gg_g = copy.deepcopy(gg) - inds = np.where(gg_g['class_ids']!=-1)[0] - gg_g['class_ids'][inds] = cls_to_genus[gg_g['class_ids'][inds]] + inds = np.where(gg_g["class_ids"] != -1)[0] + gg_g["class_ids"][inds] = cls_to_genus[gg_g["class_ids"][inds]] gt_test_g.append(gg_g) # note, will have entries geater than one as we are summing across the respective classes preds_g = [] for pp in preds: pp_g = copy.deepcopy(pp) - pp_g['class_probs'] = np.zeros((len(class_names_genus), pp_g['class_probs'].shape[1]), dtype=np.float32) + pp_g["class_probs"] = np.zeros( + (len(class_names_genus), pp_g["class_probs"].shape[1]), + dtype=np.float32, + ) for cc, inds in enumerate(genus_to_cls_map): - pp_g['class_probs'][cc, :] = pp['class_probs'][inds, :].sum(0) + pp_g["class_probs"][cc, :] = pp["class_probs"][inds, :].sum(0) preds_g.append(pp_g) return class_names_genus, preds_g, gt_test_g @@ -70,56 +77,70 @@ def create_genus_mapping(gt_test, preds, class_names): def load_tadarida_pred(ip_dir, dataset, file_of_interest): - res, ann = get_blank_annotation('Generated by Tadarida') + res, ann = get_blank_annotation("Generated by Tadarida") # create the annotations in the correct format - da_c = pd.read_csv(ip_dir + dataset + '/' + file_of_interest.replace('.wav', '.ta').replace('.WAV', '.ta'), sep='\t') + da_c = pd.read_csv( + ip_dir + + dataset + + "/" + + file_of_interest.replace(".wav", ".ta").replace(".WAV", ".ta"), + sep="\t", + ) res_c = copy.deepcopy(res) - res_c['id'] = file_of_interest - res_c['dataset'] = dataset - res_c['feats'] = da_c.iloc[:, 6:].values.astype(np.float32) + res_c["id"] = file_of_interest + res_c["dataset"] = dataset + res_c["feats"] = da_c.iloc[:, 6:].values.astype(np.float32) if da_c.shape[0] > 0: - res_c['class_name'] = '' - res_c['class_prob'] = 0.0 + res_c["class_name"] = "" + res_c["class_prob"] = 0.0 for aa in range(da_c.shape[0]): ann_c = copy.deepcopy(ann) - ann_c['class'] = 'Not Bat' # will assign to class later - ann_c['start_time'] = np.round(da_c.iloc[aa]['StTime']/1000.0 ,5) - ann_c['end_time'] = np.round((da_c.iloc[aa]['StTime'] + da_c.iloc[aa]['Dur'])/1000.0, 5) - ann_c['low_freq'] = np.round(da_c.iloc[aa]['Fmin'] * 1000.0, 2) - ann_c['high_freq'] = np.round(da_c.iloc[aa]['Fmax'] * 1000.0, 2) - ann_c['det_prob'] = 0.0 - res_c['annotation'].append(ann_c) + ann_c["class"] = "Not Bat" # will assign to class later + ann_c["start_time"] = np.round(da_c.iloc[aa]["StTime"] / 1000.0, 5) + ann_c["end_time"] = np.round( + (da_c.iloc[aa]["StTime"] + da_c.iloc[aa]["Dur"]) / 1000.0, 5 + ) + ann_c["low_freq"] = np.round(da_c.iloc[aa]["Fmin"] * 1000.0, 2) + ann_c["high_freq"] = np.round(da_c.iloc[aa]["Fmax"] * 1000.0, 2) + ann_c["det_prob"] = 0.0 + res_c["annotation"].append(ann_c) return res_c -def load_sonobat_meta(ip_dir, datasets, region_classifier, class_names, only_accepted_species=True): +def load_sonobat_meta( + ip_dir, + datasets, + region_classifier, + class_names, + only_accepted_species=True, +): sp_dict = {} for ss in class_names: - sp_key = ss.split(' ')[0][:3] + ss.split(' ')[1][:3] + sp_key = ss.split(" ")[0][:3] + ss.split(" ")[1][:3] sp_dict[sp_key] = ss - sp_dict['x'] = '' # not bat - sp_dict['Bat'] = 'Bat' + sp_dict["x"] = "" # not bat + sp_dict["Bat"] = "Bat" sonobat_meta = {} for tt in datasets: - dataset = tt['dataset_name'] - sb_ip_dir = ip_dir + dataset + '/' + region_classifier + '/' + dataset = tt["dataset_name"] + sb_ip_dir = ip_dir + dataset + "/" + region_classifier + "/" # load the call level predictions - ip_file_p = sb_ip_dir + dataset + '_Parameters_v4.5.0.txt' - #ip_file_p = sb_ip_dir + 'audio_SonoBatch_v30.0 beta.txt' - da = pd.read_csv(ip_file_p, sep='\t') + ip_file_p = sb_ip_dir + dataset + "_Parameters_v4.5.0.txt" + # ip_file_p = sb_ip_dir + 'audio_SonoBatch_v30.0 beta.txt' + da = pd.read_csv(ip_file_p, sep="\t") # load the file level predictions - ip_file_b = sb_ip_dir + dataset + '_SonoBatch_v4.5.0.txt' - #ip_file_b = sb_ip_dir + 'audio_CumulativeParameters_v30.0 beta.txt' + ip_file_b = sb_ip_dir + dataset + "_SonoBatch_v4.5.0.txt" + # ip_file_b = sb_ip_dir + 'audio_CumulativeParameters_v30.0 beta.txt' with open(ip_file_b) as f: lines = f.readlines() @@ -129,7 +150,7 @@ def load_sonobat_meta(ip_dir, datasets, region_classifier, class_names, only_acc file_res = {} for ll in lines: # note this does not seem to parse the file very well - ll_data = ll.split('\t') + ll_data = ll.split("\t") # there are sometimes many different species names per file if only_accepted_species: @@ -137,20 +158,24 @@ def load_sonobat_meta(ip_dir, datasets, region_classifier, class_names, only_acc ind = 4 else: # choosing ""~Spp" if "SppAccp" does not exist - if ll_data[4] != 'x': - ind = 4 # choosing "SppAccp", along with "Prob" here + if ll_data[4] != "x": + ind = 4 # choosing "SppAccp", along with "Prob" here else: ind = 8 # choosing "~Spp", along with "~Prob" here sp_name_1 = sp_dict[ll_data[ind]] - prob_1 = ll_data[ind+1] - if prob_1 == 'x': + prob_1 = ll_data[ind + 1] + if prob_1 == "x": prob_1 = 0.0 - file_res[ll_data[1]] = {'id':ll_data[1], 'species_1':sp_name_1, 'prob_1':prob_1} + file_res[ll_data[1]] = { + "id": ll_data[1], + "species_1": sp_name_1, + "prob_1": prob_1, + } sonobat_meta[dataset] = {} - sonobat_meta[dataset]['file_res'] = file_res - sonobat_meta[dataset]['call_info'] = da + sonobat_meta[dataset]["file_res"] = file_res + sonobat_meta[dataset]["call_info"] = da return sonobat_meta @@ -158,34 +183,38 @@ def load_sonobat_meta(ip_dir, datasets, region_classifier, class_names, only_acc def load_sonobat_preds(dataset, id, sb_meta, set_class_name=None): # create the annotations in the correct format - res, ann = get_blank_annotation('Generated by Sonobat') + res, ann = get_blank_annotation("Generated by Sonobat") res_c = copy.deepcopy(res) - res_c['id'] = id - res_c['dataset'] = dataset + res_c["id"] = id + res_c["dataset"] = dataset - da = sb_meta[dataset]['call_info'] - da_c = da[da['Filename'] == id] + da = sb_meta[dataset]["call_info"] + da_c = da[da["Filename"] == id] - file_res = sb_meta[dataset]['file_res'] - res_c['feats'] = np.zeros((0,0)) + file_res = sb_meta[dataset]["file_res"] + res_c["feats"] = np.zeros((0, 0)) if da_c.shape[0] > 0: - res_c['class_name'] = file_res[id]['species_1'] - res_c['class_prob'] = file_res[id]['prob_1'] - res_c['feats'] = da_c.iloc[:, 3:105].values.astype(np.float32) + res_c["class_name"] = file_res[id]["species_1"] + res_c["class_prob"] = file_res[id]["prob_1"] + res_c["feats"] = da_c.iloc[:, 3:105].values.astype(np.float32) for aa in range(da_c.shape[0]): ann_c = copy.deepcopy(ann) if set_class_name is None: - ann_c['class'] = file_res[id]['species_1'] + ann_c["class"] = file_res[id]["species_1"] else: - ann_c['class'] = set_class_name - ann_c['start_time'] = np.round(da_c.iloc[aa]['TimeInFile'] / 1000.0 ,5) - ann_c['end_time'] = np.round(ann_c['start_time'] + da_c.iloc[aa]['CallDuration']/1000.0, 5) - ann_c['low_freq'] = np.round(da_c.iloc[aa]['LowFreq'] * 1000.0, 2) - ann_c['high_freq'] = np.round(da_c.iloc[aa]['HiFreq'] * 1000.0, 2) - ann_c['det_prob'] = np.round(da_c.iloc[aa]['Quality'], 3) - res_c['annotation'].append(ann_c) + ann_c["class"] = set_class_name + ann_c["start_time"] = np.round( + da_c.iloc[aa]["TimeInFile"] / 1000.0, 5 + ) + ann_c["end_time"] = np.round( + ann_c["start_time"] + da_c.iloc[aa]["CallDuration"] / 1000.0, 5 + ) + ann_c["low_freq"] = np.round(da_c.iloc[aa]["LowFreq"] * 1000.0, 2) + ann_c["high_freq"] = np.round(da_c.iloc[aa]["HiFreq"] * 1000.0, 2) + ann_c["det_prob"] = np.round(da_c.iloc[aa]["Quality"], 3) + res_c["annotation"].append(ann_c) return res_c @@ -193,8 +222,18 @@ def load_sonobat_preds(dataset, id, sb_meta, set_class_name=None): def bb_overlap(bb_g_in, bb_p_in): freq_scale = 10000000.0 # ensure that both axis are roughly the same range - bb_g = [bb_g_in['start_time'], bb_g_in['low_freq']/freq_scale, bb_g_in['end_time'], bb_g_in['high_freq']/freq_scale] - bb_p = [bb_p_in['start_time'], bb_p_in['low_freq']/freq_scale, bb_p_in['end_time'], bb_p_in['high_freq']/freq_scale] + bb_g = [ + bb_g_in["start_time"], + bb_g_in["low_freq"] / freq_scale, + bb_g_in["end_time"], + bb_g_in["high_freq"] / freq_scale, + ] + bb_p = [ + bb_p_in["start_time"], + bb_p_in["low_freq"] / freq_scale, + bb_p_in["end_time"], + bb_p_in["high_freq"] / freq_scale, + ] xA = max(bb_g[0], bb_p[0]) yA = max(bb_g[1], bb_p[1]) @@ -220,13 +259,15 @@ def bb_overlap(bb_g_in, bb_p_in): def assign_to_gt(gt, pred, iou_thresh): # this will edit pred in place - num_preds = len(pred['annotation']) - num_gts = len(gt['annotation']) + num_preds = len(pred["annotation"]) + num_gts = len(gt["annotation"]) if num_preds > 0 and num_gts > 0: iou_m = np.zeros((num_preds, num_gts)) for ii in range(num_preds): for jj in range(num_gts): - iou_m[ii, jj] = bb_overlap(gt['annotation'][jj], pred['annotation'][ii]) + iou_m[ii, jj] = bb_overlap( + gt["annotation"][jj], pred["annotation"][ii] + ) # greedily assign detections to ground truths # needs to be greater than some threshold and we cannot assign GT @@ -235,7 +276,9 @@ def assign_to_gt(gt, pred, iou_thresh): for jj in range(num_gts): max_iou = np.argmax(iou_m[:, jj]) if iou_m[max_iou, jj] > iou_thresh: - pred['annotation'][max_iou]['class'] = gt['annotation'][jj]['class'] + pred["annotation"][max_iou]["class"] = gt["annotation"][jj][ + "class" + ] iou_m[max_iou, :] = -1.0 return pred @@ -244,27 +287,39 @@ def assign_to_gt(gt, pred, iou_thresh): def parse_data(data, class_names, non_event_classes, is_pred=False): class_names_all = class_names + non_event_classes - data['class_names'] = np.array([aa['class'] for aa in data['annotation']]) - data['start_times'] = np.array([aa['start_time'] for aa in data['annotation']]) - data['end_times'] = np.array([aa['end_time'] for aa in data['annotation']]) - data['high_freqs'] = np.array([float(aa['high_freq']) for aa in data['annotation']]) - data['low_freqs'] = np.array([float(aa['low_freq']) for aa in data['annotation']]) + data["class_names"] = np.array([aa["class"] for aa in data["annotation"]]) + data["start_times"] = np.array( + [aa["start_time"] for aa in data["annotation"]] + ) + data["end_times"] = np.array([aa["end_time"] for aa in data["annotation"]]) + data["high_freqs"] = np.array( + [float(aa["high_freq"]) for aa in data["annotation"]] + ) + data["low_freqs"] = np.array( + [float(aa["low_freq"]) for aa in data["annotation"]] + ) if is_pred: # when loading predictions - data['det_probs'] = np.array([float(aa['det_prob']) for aa in data['annotation']]) - data['class_probs'] = np.zeros((len(class_names)+1, len(data['annotation']))) - data['class_ids'] = np.array([class_names_all.index(aa['class']) for aa in data['annotation']]).astype(np.int32) + data["det_probs"] = np.array( + [float(aa["det_prob"]) for aa in data["annotation"]] + ) + data["class_probs"] = np.zeros( + (len(class_names) + 1, len(data["annotation"])) + ) + data["class_ids"] = np.array( + [class_names_all.index(aa["class"]) for aa in data["annotation"]] + ).astype(np.int32) else: # when loading ground truth # if the class label is not in the set of interest then set to -1 labels = [] - for aa in data['annotation']: - if aa['class'] in class_names: - labels.append(class_names_all.index(aa['class'])) + for aa in data["annotation"]: + if aa["class"] in class_names: + labels.append(class_names_all.index(aa["class"])) else: labels.append(-1) - data['class_ids'] = np.array(labels).astype(np.int32) + data["class_ids"] = np.array(labels).astype(np.int32) return data @@ -272,12 +327,17 @@ def parse_data(data, class_names, non_event_classes, is_pred=False): def load_gt_data(datasets, events_of_interest, class_names, classes_to_ignore): gt_data = [] for dd in datasets: - print('\n' + dd['dataset_name']) - gt_dataset = tu.load_set_of_anns([dd], events_of_interest=events_of_interest, verbose=True) - gt_dataset = [parse_data(gg, class_names, classes_to_ignore, False) for gg in gt_dataset] + print("\n" + dd["dataset_name"]) + gt_dataset = tu.load_set_of_anns( + [dd], events_of_interest=events_of_interest, verbose=True + ) + gt_dataset = [ + parse_data(gg, class_names, classes_to_ignore, False) + for gg in gt_dataset + ] for gt in gt_dataset: - gt['dataset_name'] = dd['dataset_name'] + gt["dataset_name"] = dd["dataset_name"] gt_data.extend(gt_dataset) @@ -300,69 +360,103 @@ def train_rf_model(x_train, y_train, num_classes, seed=2001): clf = RandomForestClassifier(random_state=seed, n_jobs=-1) clf.fit(x_train, y_train) y_pred = clf.predict(x_train) - tr_acc = (y_pred==y_train).mean() - #print('Train acc', round(tr_acc*100, 2)) + tr_acc = (y_pred == y_train).mean() + # print('Train acc', round(tr_acc*100, 2)) return clf, un_train_class def eval_rf_model(clf, pred, un_train_class, num_classes): # stores the prediction in place - if pred['feats'].shape[0] > 0: - pred['class_probs'] = np.zeros((num_classes, pred['feats'].shape[0])) - pred['class_probs'][un_train_class, :] = clf.predict_proba(pred['feats']).T - pred['det_probs'] = pred['class_probs'][:-1, :].sum(0) + if pred["feats"].shape[0] > 0: + pred["class_probs"] = np.zeros((num_classes, pred["feats"].shape[0])) + pred["class_probs"][un_train_class, :] = clf.predict_proba( + pred["feats"] + ).T + pred["det_probs"] = pred["class_probs"][:-1, :].sum(0) else: - pred['class_probs'] = np.zeros((num_classes, 0)) - pred['det_probs'] = np.zeros(0) + pred["class_probs"] = np.zeros((num_classes, 0)) + pred["det_probs"] = np.zeros(0) return pred def save_summary_to_json(op_dir, mod_name, results): op = {} - op['avg_prec'] = round(results['avg_prec'], 3) - op['avg_prec_class'] = round(results['avg_prec_class'], 3) - op['top_class'] = round(results['top_class']['avg_prec'], 3) - op['file_acc'] = round(results['file_acc'], 3) - op['model'] = mod_name + op["avg_prec"] = round(results["avg_prec"], 3) + op["avg_prec_class"] = round(results["avg_prec_class"], 3) + op["top_class"] = round(results["top_class"]["avg_prec"], 3) + op["file_acc"] = round(results["file_acc"], 3) + op["model"] = mod_name - op['per_class'] = {} - for cc in results['class_pr']: - op['per_class'][cc['name']] = cc['avg_prec'] + op["per_class"] = {} + for cc in results["class_pr"]: + op["per_class"][cc["name"]] = cc["avg_prec"] - op_file_name = os.path.join(op_dir, mod_name + '_results.json') - with open(op_file_name, 'w') as da: + op_file_name = os.path.join(op_dir, mod_name + "_results.json") + with open(op_file_name, "w") as da: json.dump(op, da, indent=2) -def print_results(model_name, mod_str, results, op_dir, class_names, file_type, title_text=''): - print('\nResults - ' + model_name) - print('avg_prec ', round(results['avg_prec'], 3)) - print('avg_prec_class', round(results['avg_prec_class'], 3)) - print('top_class ', round(results['top_class']['avg_prec'], 3)) - print('file_acc ', round(results['file_acc'], 3)) +def print_results( + model_name, mod_str, results, op_dir, class_names, file_type, title_text="" +): + print("\nResults - " + model_name) + print("avg_prec ", round(results["avg_prec"], 3)) + print("avg_prec_class", round(results["avg_prec_class"], 3)) + print("top_class ", round(results["top_class"]["avg_prec"], 3)) + print("file_acc ", round(results["file_acc"], 3)) - print('\nSaving ' + model_name + ' results to: ' + op_dir) + print("\nSaving " + model_name + " results to: " + op_dir) save_summary_to_json(op_dir, mod_str, results) - pu.plot_pr_curve(op_dir, mod_str+'_test_all_det', mod_str+'_test_all_det', results, file_type, title_text + 'Detection PR') - pu.plot_pr_curve(op_dir, mod_str+'_test_all_top_class', mod_str+'_test_all_top_class', results['top_class'], file_type, title_text + 'Top Class') - pu.plot_pr_curve_class(op_dir, mod_str+'_test_all_class', mod_str+'_test_all_class', results, file_type, title_text + 'Per-Class PR') - pu.plot_confusion_matrix(op_dir, mod_str+'_confusion', results['gt_valid_file'], results['pred_valid_file'], - results['file_acc'], class_names, True, file_type, title_text + 'Confusion Matrix') + pu.plot_pr_curve( + op_dir, + mod_str + "_test_all_det", + mod_str + "_test_all_det", + results, + file_type, + title_text + "Detection PR", + ) + pu.plot_pr_curve( + op_dir, + mod_str + "_test_all_top_class", + mod_str + "_test_all_top_class", + results["top_class"], + file_type, + title_text + "Top Class", + ) + pu.plot_pr_curve_class( + op_dir, + mod_str + "_test_all_class", + mod_str + "_test_all_class", + results, + file_type, + title_text + "Per-Class PR", + ) + pu.plot_confusion_matrix( + op_dir, + mod_str + "_confusion", + results["gt_valid_file"], + results["pred_valid_file"], + results["file_acc"], + class_names, + True, + file_type, + title_text + "Confusion Matrix", + ) def add_root_path_back(data_sets, ann_path, wav_path): for dd in data_sets: - dd['ann_path'] = os.path.join(ann_path, dd['ann_path']) - dd['wav_path'] = os.path.join(wav_path, dd['wav_path']) + dd["ann_path"] = os.path.join(ann_path, dd["ann_path"]) + dd["wav_path"] = os.path.join(wav_path, dd["wav_path"]) return data_sets def check_classes_in_train(gt_list, class_names): - num_gt_total = np.sum([gg['start_times'].shape[0] for gg in gt_list]) + num_gt_total = np.sum([gg["start_times"].shape[0] for gg in gt_list]) num_with_no_class = 0 for gt in gt_list: - for cc in gt['class_names']: + for cc in gt["class_names"]: if cc not in class_names: num_with_no_class += 1 return num_with_no_class @@ -371,195 +465,337 @@ def check_classes_in_train(gt_list, class_names): if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('op_dir', type=str, default='plots/results_compare/', - help='Output directory for plots') - parser.add_argument('data_dir', type=str, - help='Path to root of datasets') - parser.add_argument('ann_dir', type=str, - help='Path to extracted annotations') - parser.add_argument('bd_model_path', type=str, - help='Path to BatDetect model') - parser.add_argument('--test_file', type=str, default='', - help='Path to json file used for evaluation.') - parser.add_argument('--sb_ip_dir', type=str, default='', - help='Path to sonobat predictions') - parser.add_argument('--sb_region_classifier', type=str, default='south', - help='Path to sonobat predictions') - parser.add_argument('--td_ip_dir', type=str, default='', - help='Path to tadarida_D predictions') - parser.add_argument('--iou_thresh', type=float, default=0.01, - help='IOU threshold for assigning predictions to ground truth') - parser.add_argument('--file_type', type=str, default='png', - help='Type of image to save - png or pdf') - parser.add_argument('--title_text', type=str, default='', - help='Text to add as title of plots') - parser.add_argument('--rand_seed', type=int, default=2001, - help='Random seed') + parser.add_argument( + "op_dir", + type=str, + default="plots/results_compare/", + help="Output directory for plots", + ) + parser.add_argument("data_dir", type=str, help="Path to root of datasets") + parser.add_argument( + "ann_dir", type=str, help="Path to extracted annotations" + ) + parser.add_argument( + "bd_model_path", type=str, help="Path to BatDetect model" + ) + parser.add_argument( + "--test_file", + type=str, + default="", + help="Path to json file used for evaluation.", + ) + parser.add_argument( + "--sb_ip_dir", type=str, default="", help="Path to sonobat predictions" + ) + parser.add_argument( + "--sb_region_classifier", + type=str, + default="south", + help="Path to sonobat predictions", + ) + parser.add_argument( + "--td_ip_dir", + type=str, + default="", + help="Path to tadarida_D predictions", + ) + parser.add_argument( + "--iou_thresh", + type=float, + default=0.01, + help="IOU threshold for assigning predictions to ground truth", + ) + parser.add_argument( + "--file_type", + type=str, + default="png", + help="Type of image to save - png or pdf", + ) + parser.add_argument( + "--title_text", + type=str, + default="", + help="Text to add as title of plots", + ) + parser.add_argument( + "--rand_seed", type=int, default=2001, help="Random seed" + ) args = vars(parser.parse_args()) - np.random.seed(args['rand_seed']) - - if not os.path.isdir(args['op_dir']): - os.makedirs(args['op_dir']) + np.random.seed(args["rand_seed"]) + if not os.path.isdir(args["op_dir"]): + os.makedirs(args["op_dir"]) # load the model params_eval = parameters.get_params(False) - _, params_bd = du.load_model(args['bd_model_path']) + _, params_bd = du.load_model(args["bd_model_path"]) - class_names = params_bd['class_names'] + class_names = params_bd["class_names"] num_classes = len(class_names) + 1 # num classes plus background class - classes_to_ignore = ['Not Bat', 'Bat', 'Unknown'] - events_of_interest = ['Echolocation'] + classes_to_ignore = ["Not Bat", "Bat", "Unknown"] + events_of_interest = ["Echolocation"] # load test data - if args['test_file'] == '': + if args["test_file"] == "": # load the test files of interest from the trained model - test_sets = add_root_path_back(params_bd['test_sets'], args['ann_dir'], args['data_dir']) - test_sets = [dd for dd in test_sets if not dd['is_binary']] # exclude bat/not datasets + test_sets = add_root_path_back( + params_bd["test_sets"], args["ann_dir"], args["data_dir"] + ) + test_sets = [ + dd for dd in test_sets if not dd["is_binary"] + ] # exclude bat/not datasets else: # user specified annotation file to evaluate test_dict = {} - test_dict['dataset_name'] = args['test_file'].replace('.json', '') - test_dict['is_test'] = True - test_dict['is_binary'] = True - test_dict['ann_path'] = os.path.join(args['ann_dir'], args['test_file']) - test_dict['wav_path'] = args['data_dir'] + test_dict["dataset_name"] = args["test_file"].replace(".json", "") + test_dict["is_test"] = True + test_dict["is_binary"] = True + test_dict["ann_path"] = os.path.join(args["ann_dir"], args["test_file"]) + test_dict["wav_path"] = args["data_dir"] test_sets = [test_dict] # load the gt for the test set - gt_test = load_gt_data(test_sets, events_of_interest, class_names, classes_to_ignore) - total_num_calls = np.sum([gg['start_times'].shape[0] for gg in gt_test]) - print('\nTotal number of test files:', len(gt_test)) - print('Total number of test calls:', np.sum([gg['start_times'].shape[0] for gg in gt_test])) + gt_test = load_gt_data( + test_sets, events_of_interest, class_names, classes_to_ignore + ) + total_num_calls = np.sum([gg["start_times"].shape[0] for gg in gt_test]) + print("\nTotal number of test files:", len(gt_test)) + print( + "Total number of test calls:", + np.sum([gg["start_times"].shape[0] for gg in gt_test]), + ) # check if test contains classes not in the train set num_with_no_class = check_classes_in_train(gt_test, class_names) if total_num_calls == num_with_no_class: - print('Classes from the test set are not in the train set.') + print("Classes from the test set are not in the train set.") assert False # only need the train data if evaluating Sonobat or Tadarida - if args['sb_ip_dir'] != '' or args['td_ip_dir'] != '': - train_sets = add_root_path_back(params_bd['train_sets'], args['ann_dir'], args['data_dir']) - train_sets = [dd for dd in train_sets if not dd['is_binary']] # exclude bat/not datasets - gt_train = load_gt_data(train_sets, events_of_interest, class_names, classes_to_ignore) - + if args["sb_ip_dir"] != "" or args["td_ip_dir"] != "": + train_sets = add_root_path_back( + params_bd["train_sets"], args["ann_dir"], args["data_dir"] + ) + train_sets = [ + dd for dd in train_sets if not dd["is_binary"] + ] # exclude bat/not datasets + gt_train = load_gt_data( + train_sets, events_of_interest, class_names, classes_to_ignore + ) # # evaluate Sonobat by training random forest classifier # # NOTE: Sonobat may only make predictions for a subset of the files # - if args['sb_ip_dir'] != '': - sb_meta = load_sonobat_meta(args['sb_ip_dir'], train_sets + test_sets, args['sb_region_classifier'], class_names) + if args["sb_ip_dir"] != "": + sb_meta = load_sonobat_meta( + args["sb_ip_dir"], + train_sets + test_sets, + args["sb_region_classifier"], + class_names, + ) preds_sb = [] keep_inds_sb = [] for ii, gt in enumerate(gt_test): - sb_pred = load_sonobat_preds(gt['dataset_name'], gt['id'], sb_meta) - if sb_pred['class_name'] != '': - sb_pred = parse_data(sb_pred, class_names, classes_to_ignore, True) - sb_pred['class_probs'][sb_pred['class_ids'], np.arange(sb_pred['class_probs'].shape[1])] = sb_pred['det_probs'] + sb_pred = load_sonobat_preds(gt["dataset_name"], gt["id"], sb_meta) + if sb_pred["class_name"] != "": + sb_pred = parse_data( + sb_pred, class_names, classes_to_ignore, True + ) + sb_pred["class_probs"][ + sb_pred["class_ids"], + np.arange(sb_pred["class_probs"].shape[1]), + ] = sb_pred["det_probs"] preds_sb.append(sb_pred) keep_inds_sb.append(ii) - results_sb = evl.evaluate_predictions([gt_test[ii] for ii in keep_inds_sb], preds_sb, class_names, - params_eval['detection_overlap'], params_eval['ignore_start_end']) - print_results('Sonobat', 'sb', results_sb, args['op_dir'], class_names, - args['file_type'], args['title_text'] + ' - Species - ') - print('Only reporting results for', len(keep_inds_sb), 'files, out of', len(gt_test)) - + results_sb = evl.evaluate_predictions( + [gt_test[ii] for ii in keep_inds_sb], + preds_sb, + class_names, + params_eval["detection_overlap"], + params_eval["ignore_start_end"], + ) + print_results( + "Sonobat", + "sb", + results_sb, + args["op_dir"], + class_names, + args["file_type"], + args["title_text"] + " - Species - ", + ) + print( + "Only reporting results for", + len(keep_inds_sb), + "files, out of", + len(gt_test), + ) # train our own random forest on sonobat features x_train = [] y_train = [] for gt in gt_train: - pred = load_sonobat_preds(gt['dataset_name'], gt['id'], sb_meta, 'Not Bat') + pred = load_sonobat_preds( + gt["dataset_name"], gt["id"], sb_meta, "Not Bat" + ) - if len(pred['annotation']) > 0: + if len(pred["annotation"]) > 0: # compute detection overlap with ground truth to determine which are the TP detections - assign_to_gt(gt, pred, args['iou_thresh']) + assign_to_gt(gt, pred, args["iou_thresh"]) pred = parse_data(pred, class_names, classes_to_ignore, True) - x_train.append(pred['feats']) - y_train.append(pred['class_ids']) + x_train.append(pred["feats"]) + y_train.append(pred["class_ids"]) # train random forest on tadarida predictions - clf_sb, un_train_class = train_rf_model(x_train, y_train, num_classes, args['rand_seed']) + clf_sb, un_train_class = train_rf_model( + x_train, y_train, num_classes, args["rand_seed"] + ) # run the model on the test set preds_sb_rf = [] for gt in gt_test: - pred = load_sonobat_preds(gt['dataset_name'], gt['id'], sb_meta, 'Not Bat') + pred = load_sonobat_preds( + gt["dataset_name"], gt["id"], sb_meta, "Not Bat" + ) pred = parse_data(pred, class_names, classes_to_ignore, True) pred = eval_rf_model(clf_sb, pred, un_train_class, num_classes) preds_sb_rf.append(pred) - results_sb_rf = evl.evaluate_predictions(gt_test, preds_sb_rf, class_names, - params_eval['detection_overlap'], params_eval['ignore_start_end']) - print_results('Sonobat RF', 'sb_rf', results_sb_rf, args['op_dir'], class_names, - args['file_type'], args['title_text'] + ' - Species - ') - print('\n\nWARNING\nThis is evaluating on the full test set, but there is only dections for a subset of files\n\n') - + results_sb_rf = evl.evaluate_predictions( + gt_test, + preds_sb_rf, + class_names, + params_eval["detection_overlap"], + params_eval["ignore_start_end"], + ) + print_results( + "Sonobat RF", + "sb_rf", + results_sb_rf, + args["op_dir"], + class_names, + args["file_type"], + args["title_text"] + " - Species - ", + ) + print( + "\n\nWARNING\nThis is evaluating on the full test set, but there is only dections for a subset of files\n\n" + ) # # evaluate Tadarida-D by training random forest classifier # - if args['td_ip_dir'] != '': + if args["td_ip_dir"] != "": x_train = [] y_train = [] for gt in gt_train: - pred = load_tadarida_pred(args['td_ip_dir'], gt['dataset_name'], gt['id']) + pred = load_tadarida_pred( + args["td_ip_dir"], gt["dataset_name"], gt["id"] + ) # compute detection overlap with ground truth to determine which are the TP detections - assign_to_gt(gt, pred, args['iou_thresh']) + assign_to_gt(gt, pred, args["iou_thresh"]) pred = parse_data(pred, class_names, classes_to_ignore, True) - x_train.append(pred['feats']) - y_train.append(pred['class_ids']) + x_train.append(pred["feats"]) + y_train.append(pred["class_ids"]) # train random forest on Tadarida-D predictions - clf_td, un_train_class = train_rf_model(x_train, y_train, num_classes, args['rand_seed']) + clf_td, un_train_class = train_rf_model( + x_train, y_train, num_classes, args["rand_seed"] + ) # run the model on the test set preds_td = [] for gt in gt_test: - pred = load_tadarida_pred(args['td_ip_dir'], gt['dataset_name'], gt['id']) + pred = load_tadarida_pred( + args["td_ip_dir"], gt["dataset_name"], gt["id"] + ) pred = parse_data(pred, class_names, classes_to_ignore, True) pred = eval_rf_model(clf_td, pred, un_train_class, num_classes) preds_td.append(pred) - results_td = evl.evaluate_predictions(gt_test, preds_td, class_names, - params_eval['detection_overlap'], params_eval['ignore_start_end']) - print_results('Tadarida', 'td_rf', results_td, args['op_dir'], class_names, - args['file_type'], args['title_text'] + ' - Species - ') - + results_td = evl.evaluate_predictions( + gt_test, + preds_td, + class_names, + params_eval["detection_overlap"], + params_eval["ignore_start_end"], + ) + print_results( + "Tadarida", + "td_rf", + results_td, + args["op_dir"], + class_names, + args["file_type"], + args["title_text"] + " - Species - ", + ) # # evaluate BatDetect # - if args['bd_model_path'] != '': + if args["bd_model_path"] != "": # load model - bd_args = du.get_default_bd_args() - model, params_bd = du.load_model(args['bd_model_path']) + bd_args = du.get_default_run_config() + model, params_bd = du.load_model(args["bd_model_path"]) # check if the class names are the same - if params_bd['class_names'] != class_names: - print('Warning: Class names are not the same as the trained model') + if params_bd["class_names"] != class_names: + print("Warning: Class names are not the same as the trained model") assert False + run_config = { + **bd_args, + **params_bd, + "return_raw_preds": True, + } + preds_bd = [] for ii, gg in enumerate(gt_test): - pred = du.process_file(gg['file_path'], model, params_bd, bd_args, return_raw_preds=True) + pred = du.process_file( + gg["file_path"], + model, + run_config, + ) preds_bd.append(pred) - results_bd = evl.evaluate_predictions(gt_test, preds_bd, class_names, - params_eval['detection_overlap'], params_eval['ignore_start_end']) - print_results('BatDetect', 'bd', results_bd, args['op_dir'], - class_names, args['file_type'], args['title_text'] + ' - Species - ') + results_bd = evl.evaluate_predictions( + gt_test, + preds_bd, + class_names, + params_eval["detection_overlap"], + params_eval["ignore_start_end"], + ) + print_results( + "BatDetect", + "bd", + results_bd, + args["op_dir"], + class_names, + args["file_type"], + args["title_text"] + " - Species - ", + ) # evaluate genus level - class_names_genus, preds_bd_g, gt_test_g = create_genus_mapping(gt_test, preds_bd, class_names) - results_bd_genus = evl.evaluate_predictions(gt_test_g, preds_bd_g, class_names_genus, - params_eval['detection_overlap'], params_eval['ignore_start_end']) - print_results('BatDetect Genus', 'bd_genus', results_bd_genus, args['op_dir'], - class_names_genus, args['file_type'], args['title_text'] + ' - Genus - ') + class_names_genus, preds_bd_g, gt_test_g = create_genus_mapping( + gt_test, preds_bd, class_names + ) + results_bd_genus = evl.evaluate_predictions( + gt_test_g, + preds_bd_g, + class_names_genus, + params_eval["detection_overlap"], + params_eval["ignore_start_end"], + ) + print_results( + "BatDetect Genus", + "bd_genus", + results_bd_genus, + args["op_dir"], + class_names_genus, + args["file_type"], + args["title_text"] + " - Genus - ", + ) diff --git a/bat_detect/finetune/__init__.py b/bat_detect/finetune/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/bat_detect/finetune/finetune_model.py b/bat_detect/finetune/finetune_model.py index 4fecc48..8988096 100644 --- a/bat_detect/finetune/finetune_model.py +++ b/bat_detect/finetune/finetune_model.py @@ -1,183 +1,321 @@ -import numpy as np -import matplotlib.pyplot as plt +import argparse +import glob +import json import os +import sys + +import matplotlib.pyplot as plt +import numpy as np import torch import torch.nn.functional as F from torch.optim.lr_scheduler import CosineAnnealingLR -import json -import argparse -import glob -import sys -sys.path.append(os.path.join('..', '..')) -import bat_detect.train.train_model as tm +sys.path.append(os.path.join("..", "..")) +import bat_detect.detector.models as models +import bat_detect.detector.parameters as parameters +import bat_detect.detector.post_process as pp import bat_detect.train.audio_dataloader as adl import bat_detect.train.evaluate as evl -import bat_detect.train.train_utils as tu import bat_detect.train.losses as losses - -import bat_detect.detector.parameters as parameters -import bat_detect.detector.models as models -import bat_detect.detector.post_process as pp -import bat_detect.utils.plot_utils as pu +import bat_detect.train.train_model as tm +import bat_detect.train.train_utils as tu import bat_detect.utils.detector_utils as du - +import bat_detect.utils.plot_utils as pu if __name__ == "__main__": - info_str = '\nBatDetect - Finetune Model\n' + info_str = "\nBatDetect - Finetune Model\n" print(info_str) parser = argparse.ArgumentParser() - parser.add_argument('audio_path', type=str, help='Input directory for audio') - parser.add_argument('train_ann_path', type=str, - help='Path to where train annotation file is stored') - parser.add_argument('test_ann_path', type=str, - help='Path to where test annotation file is stored') - parser.add_argument('model_path', type=str, - help='Path to pretrained model') - parser.add_argument('--op_model_name', type=str, default='', - help='Path and name for finetuned model') - parser.add_argument('--num_epochs', type=int, default=200, dest='num_epochs', - help='Number of finetuning epochs') - parser.add_argument('--finetune_only_last_layer', action='store_true', - help='Only train final layers') - parser.add_argument('--train_from_scratch', action='store_true', - help='Do not use pretrained weights') - parser.add_argument('--do_not_save_images', action='store_false', - help='Do not save images at the end of training') - parser.add_argument('--notes', type=str, default='', - help='Notes to save in text file') + parser.add_argument( + "audio_path", type=str, help="Input directory for audio" + ) + parser.add_argument( + "train_ann_path", + type=str, + help="Path to where train annotation file is stored", + ) + parser.add_argument( + "test_ann_path", + type=str, + help="Path to where test annotation file is stored", + ) + parser.add_argument("model_path", type=str, help="Path to pretrained model") + parser.add_argument( + "--op_model_name", + type=str, + default="", + help="Path and name for finetuned model", + ) + parser.add_argument( + "--num_epochs", + type=int, + default=200, + dest="num_epochs", + help="Number of finetuning epochs", + ) + parser.add_argument( + "--finetune_only_last_layer", + action="store_true", + help="Only train final layers", + ) + parser.add_argument( + "--train_from_scratch", + action="store_true", + help="Do not use pretrained weights", + ) + parser.add_argument( + "--do_not_save_images", + action="store_false", + help="Do not save images at the end of training", + ) + parser.add_argument( + "--notes", type=str, default="", help="Notes to save in text file" + ) args = vars(parser.parse_args()) - params = parameters.get_params(True, '../../experiments/') + params = parameters.get_params(True, "../../experiments/") if torch.cuda.is_available(): - params['device'] = 'cuda' + params["device"] = "cuda" else: - params['device'] = 'cpu' - print('\nNote, this will be a lot faster if you use computer with a GPU.\n') + params["device"] = "cpu" + print( + "\nNote, this will be a lot faster if you use computer with a GPU.\n" + ) - print('\nAudio directory: ' + args['audio_path']) - print('Train file: ' + args['train_ann_path']) - print('Test file: ' + args['test_ann_path']) - print('Loading model: ' + args['model_path']) + print("\nAudio directory: " + args["audio_path"]) + print("Train file: " + args["train_ann_path"]) + print("Test file: " + args["test_ann_path"]) + print("Loading model: " + args["model_path"]) - dataset_name = os.path.basename(args['train_ann_path']).replace('.json', '').replace('_TRAIN', '') + dataset_name = ( + os.path.basename(args["train_ann_path"]) + .replace(".json", "") + .replace("_TRAIN", "") + ) - if args['train_from_scratch']: - print('\nTraining model from scratch i.e. not using pretrained weights') - model, params_train = du.load_model(args['model_path'], False) + if args["train_from_scratch"]: + print("\nTraining model from scratch i.e. not using pretrained weights") + model, params_train = du.load_model(args["model_path"], False) else: - model, params_train = du.load_model(args['model_path'], True) - model.to(params['device']) + model, params_train = du.load_model(args["model_path"], True) + model.to(params["device"]) - params['num_epochs'] = args['num_epochs'] - if args['op_model_name'] != '': - params['model_file_name'] = args['op_model_name'] - classes_to_ignore = params['classes_to_ignore']+params['generic_class'] + params["num_epochs"] = args["num_epochs"] + if args["op_model_name"] != "": + params["model_file_name"] = args["op_model_name"] + classes_to_ignore = params["classes_to_ignore"] + params["generic_class"] # save notes file - params['notes'] = args['notes'] - if args['notes'] != '': - tu.write_notes_file(params['experiment'] + 'notes.txt', args['notes']) - + params["notes"] = args["notes"] + if args["notes"] != "": + tu.write_notes_file(params["experiment"] + "notes.txt", args["notes"]) # load train annotations train_sets = [] - train_sets.append(tu.get_blank_dataset_dict(dataset_name, False, args['train_ann_path'], args['audio_path'])) - params['train_sets'] = [tu.get_blank_dataset_dict(dataset_name, False, os.path.basename(args['train_ann_path']), args['audio_path'])] + train_sets.append( + tu.get_blank_dataset_dict( + dataset_name, False, args["train_ann_path"], args["audio_path"] + ) + ) + params["train_sets"] = [ + tu.get_blank_dataset_dict( + dataset_name, + False, + os.path.basename(args["train_ann_path"]), + args["audio_path"], + ) + ] - print('\nTrain set:') - data_train, params['class_names'], params['class_inv_freq'] = \ - tu.load_set_of_anns(train_sets, classes_to_ignore, params['events_of_interest']) - print('Number of files', len(data_train)) + print("\nTrain set:") + ( + data_train, + params["class_names"], + params["class_inv_freq"], + ) = tu.load_set_of_anns( + train_sets, classes_to_ignore, params["events_of_interest"] + ) + print("Number of files", len(data_train)) - params['genus_names'], params['genus_mapping'] = tu.get_genus_mapping(params['class_names']) - params['class_names_short'] = tu.get_short_class_names(params['class_names']) + params["genus_names"], params["genus_mapping"] = tu.get_genus_mapping( + params["class_names"] + ) + params["class_names_short"] = tu.get_short_class_names( + params["class_names"] + ) # load test annotations test_sets = [] - test_sets.append(tu.get_blank_dataset_dict(dataset_name, True, args['test_ann_path'], args['audio_path'])) - params['test_sets'] = [tu.get_blank_dataset_dict(dataset_name, True, os.path.basename(args['test_ann_path']), args['audio_path'])] + test_sets.append( + tu.get_blank_dataset_dict( + dataset_name, True, args["test_ann_path"], args["audio_path"] + ) + ) + params["test_sets"] = [ + tu.get_blank_dataset_dict( + dataset_name, + True, + os.path.basename(args["test_ann_path"]), + args["audio_path"], + ) + ] - print('\nTest set:') - data_test, _, _ = tu.load_set_of_anns(test_sets, classes_to_ignore, params['events_of_interest']) - print('Number of files', len(data_test)) + print("\nTest set:") + data_test, _, _ = tu.load_set_of_anns( + test_sets, classes_to_ignore, params["events_of_interest"] + ) + print("Number of files", len(data_test)) # train loader train_dataset = adl.AudioLoader(data_train, params, is_train=True) - train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=params['batch_size'], - shuffle=True, num_workers=params['num_workers'], pin_memory=True) + train_loader = torch.utils.data.DataLoader( + train_dataset, + batch_size=params["batch_size"], + shuffle=True, + num_workers=params["num_workers"], + pin_memory=True, + ) # test loader - batch size of one because of variable file length test_dataset = adl.AudioLoader(data_test, params, is_train=False) - test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, - shuffle=False, num_workers=params['num_workers'], pin_memory=True) + test_loader = torch.utils.data.DataLoader( + test_dataset, + batch_size=1, + shuffle=False, + num_workers=params["num_workers"], + pin_memory=True, + ) inputs_train = next(iter(train_loader)) - params['ip_height'] = inputs_train['spec'].shape[2] - print('\ntrain batch size :', inputs_train['spec'].shape) + params["ip_height"] = inputs_train["spec"].shape[2] + print("\ntrain batch size :", inputs_train["spec"].shape) - assert(params_train['model_name'] == 'Net2DFast') - print('\n\nSOME hyperparams need to be the same as the loaded model (e.g. FFT) - currently they are getting overwritten.\n\n') + assert params_train["model_name"] == "Net2DFast" + print( + "\n\nSOME hyperparams need to be the same as the loaded model (e.g. FFT) - currently they are getting overwritten.\n\n" + ) # set the number of output classes num_filts = model.conv_classes_op.in_channels k_size = model.conv_classes_op.kernel_size pad = model.conv_classes_op.padding - model.conv_classes_op = torch.nn.Conv2d(num_filts, len(params['class_names'])+1, kernel_size=k_size, padding=pad) - model.conv_classes_op.to(params['device']) + model.conv_classes_op = torch.nn.Conv2d( + num_filts, + len(params["class_names"]) + 1, + kernel_size=k_size, + padding=pad, + ) + model.conv_classes_op.to(params["device"]) - if args['finetune_only_last_layer']: - print('\nOnly finetuning the final layers.\n') - train_layers_i = ['conv_classes', 'conv_classes_op', 'conv_size', 'conv_size_op'] - train_layers = [tt + '.weight' for tt in train_layers_i] + [tt + '.bias' for tt in train_layers_i] + if args["finetune_only_last_layer"]: + print("\nOnly finetuning the final layers.\n") + train_layers_i = [ + "conv_classes", + "conv_classes_op", + "conv_size", + "conv_size_op", + ] + train_layers = [tt + ".weight" for tt in train_layers_i] + [ + tt + ".bias" for tt in train_layers_i + ] for name, param in model.named_parameters(): if name in train_layers: param.requires_grad = True else: param.requires_grad = False - optimizer = torch.optim.Adam(model.parameters(), lr=params['lr']) - scheduler = CosineAnnealingLR(optimizer, params['num_epochs'] * len(train_loader)) - if params['train_loss'] == 'mse': + optimizer = torch.optim.Adam(model.parameters(), lr=params["lr"]) + scheduler = CosineAnnealingLR( + optimizer, params["num_epochs"] * len(train_loader) + ) + if params["train_loss"] == "mse": det_criterion = losses.mse_loss - elif params['train_loss'] == 'focal': + elif params["train_loss"] == "focal": det_criterion = losses.focal_loss # plotting - train_plt_ls = pu.LossPlotter(params['experiment'] + 'train_loss.png', params['num_epochs']+1, - ['train_loss'], None, None, ['epoch', 'train_loss'], logy=True) - test_plt_ls = pu.LossPlotter(params['experiment'] + 'test_loss.png', params['num_epochs']+1, - ['test_loss'], None, None, ['epoch', 'test_loss'], logy=True) - test_plt = pu.LossPlotter(params['experiment'] + 'test.png', params['num_epochs']+1, - ['avg_prec', 'rec_at_x', 'avg_prec_class', 'file_acc', 'top_class'], [0,1], None, ['epoch', '']) - test_plt_class = pu.LossPlotter(params['experiment'] + 'test_avg_prec.png', params['num_epochs']+1, - params['class_names_short'], [0,1], params['class_names_short'], ['epoch', 'avg_prec']) + train_plt_ls = pu.LossPlotter( + params["experiment"] + "train_loss.png", + params["num_epochs"] + 1, + ["train_loss"], + None, + None, + ["epoch", "train_loss"], + logy=True, + ) + test_plt_ls = pu.LossPlotter( + params["experiment"] + "test_loss.png", + params["num_epochs"] + 1, + ["test_loss"], + None, + None, + ["epoch", "test_loss"], + logy=True, + ) + test_plt = pu.LossPlotter( + params["experiment"] + "test.png", + params["num_epochs"] + 1, + ["avg_prec", "rec_at_x", "avg_prec_class", "file_acc", "top_class"], + [0, 1], + None, + ["epoch", ""], + ) + test_plt_class = pu.LossPlotter( + params["experiment"] + "test_avg_prec.png", + params["num_epochs"] + 1, + params["class_names_short"], + [0, 1], + params["class_names_short"], + ["epoch", "avg_prec"], + ) # main train loop - for epoch in range(0, params['num_epochs']+1): + for epoch in range(0, params["num_epochs"] + 1): - train_loss = tm.train(model, epoch, train_loader, det_criterion, optimizer, scheduler, params) - train_plt_ls.update_and_save(epoch, [train_loss['train_loss']]) + train_loss = tm.train( + model, + epoch, + train_loader, + det_criterion, + optimizer, + scheduler, + params, + ) + train_plt_ls.update_and_save(epoch, [train_loss["train_loss"]]) - if epoch % params['num_eval_epochs'] == 0: + if epoch % params["num_eval_epochs"] == 0: # detection accuracy on test set - test_res, test_loss = tm.test(model, epoch, test_loader, det_criterion, params) - test_plt_ls.update_and_save(epoch, [test_loss['test_loss']]) - test_plt.update_and_save(epoch, [test_res['avg_prec'], test_res['rec_at_x'], - test_res['avg_prec_class'], test_res['file_acc'], test_res['top_class']['avg_prec']]) - test_plt_class.update_and_save(epoch, [rs['avg_prec'] for rs in test_res['class_pr']]) - pu.plot_pr_curve_class(params['experiment'] , 'test_pr', 'test_pr', test_res) + test_res, test_loss = tm.test( + model, epoch, test_loader, det_criterion, params + ) + test_plt_ls.update_and_save(epoch, [test_loss["test_loss"]]) + test_plt.update_and_save( + epoch, + [ + test_res["avg_prec"], + test_res["rec_at_x"], + test_res["avg_prec_class"], + test_res["file_acc"], + test_res["top_class"]["avg_prec"], + ], + ) + test_plt_class.update_and_save( + epoch, [rs["avg_prec"] for rs in test_res["class_pr"]] + ) + pu.plot_pr_curve_class( + params["experiment"], "test_pr", "test_pr", test_res + ) # save finetuned model - print('saving model to: ' + params['model_file_name']) - op_state = {'epoch': epoch + 1, - 'state_dict': model.state_dict(), - 'params' : params} - torch.save(op_state, params['model_file_name']) - + print("saving model to: " + params["model_file_name"]) + op_state = { + "epoch": epoch + 1, + "state_dict": model.state_dict(), + "params": params, + } + torch.save(op_state, params["model_file_name"]) # save an image with associated prediction for each batch in the test set - if not args['do_not_save_images']: + if not args["do_not_save_images"]: tm.save_images_batch(model, test_loader, params) diff --git a/bat_detect/finetune/prep_data_finetune.py b/bat_detect/finetune/prep_data_finetune.py index 3e86cd4..d8d1df8 100644 --- a/bat_detect/finetune/prep_data_finetune.py +++ b/bat_detect/finetune/prep_data_finetune.py @@ -1,32 +1,33 @@ -import numpy as np import argparse -import os import json - +import os import sys -sys.path.append(os.path.join('..', '..')) + +import numpy as np + +sys.path.append(os.path.join("..", "..")) import bat_detect.train.train_utils as tu def print_dataset_stats(data, split_name, classes_to_ignore): - print('\nSplit:', split_name) - print('Num files:', len(data)) + print("\nSplit:", split_name) + print("Num files:", len(data)) class_cnts = {} for dd in data: - for aa in dd['annotation']: - if aa['class'] not in classes_to_ignore: - if aa['class'] in class_cnts: - class_cnts[aa['class']] += 1 + for aa in dd["annotation"]: + if aa["class"] not in classes_to_ignore: + if aa["class"] in class_cnts: + class_cnts[aa["class"]] += 1 else: - class_cnts[aa['class']] = 1 + class_cnts[aa["class"]] = 1 if len(class_cnts) == 0: class_names = [] else: class_names = np.sort([*class_cnts]).tolist() - print('Class count:') + print("Class count:") str_len = np.max([len(cc) for cc in class_names]) + 5 for ii, cc in enumerate(class_names): @@ -41,111 +42,165 @@ def load_file_names(file_name): with open(file_name) as da: files = [line.rstrip() for line in da.readlines()] for ff in files: - if ff.lower()[-3:] != 'wav': - print('Error: Filenames need to end in .wav - ', ff) - assert(False) + if ff.lower()[-3:] != "wav": + print("Error: Filenames need to end in .wav - ", ff) + assert False else: - print('Error: Input file not found - ', file_name) - assert(False) + print("Error: Input file not found - ", file_name) + assert False return files if __name__ == "__main__": - info_str = '\nBatDetect - Prepare Data for Finetuning\n' + info_str = "\nBatDetect - Prepare Data for Finetuning\n" print(info_str) parser = argparse.ArgumentParser() - parser.add_argument('dataset_name', type=str, help='Name to call your dataset') - parser.add_argument('audio_dir', type=str, help='Input directory for audio') - parser.add_argument('ann_dir', type=str, help='Input directory for where the audio annotations are stored') - parser.add_argument('op_dir', type=str, help='Path where the train and test splits will be stored') - parser.add_argument('--percent_val', type=float, default=0.20, - help='Hold out this much data for validation. Should be number between 0 and 1') - parser.add_argument('--rand_seed', type=int, default=2001, - help='Random seed used for creating the validation split') - parser.add_argument('--train_file', type=str, default='', - help='Text file where each line is a wav file in train split') - parser.add_argument('--test_file', type=str, default='', - help='Text file where each line is a wav file in test split') - parser.add_argument('--input_class_names', type=str, default='', - help='Specify names of classes that you want to change. Separate with ";"') - parser.add_argument('--output_class_names', type=str, default='', - help='New class names to use instead. One to one mapping with "--input_class_names". \ - Separate with ";"') + parser.add_argument( + "dataset_name", type=str, help="Name to call your dataset" + ) + parser.add_argument("audio_dir", type=str, help="Input directory for audio") + parser.add_argument( + "ann_dir", + type=str, + help="Input directory for where the audio annotations are stored", + ) + parser.add_argument( + "op_dir", + type=str, + help="Path where the train and test splits will be stored", + ) + parser.add_argument( + "--percent_val", + type=float, + default=0.20, + help="Hold out this much data for validation. Should be number between 0 and 1", + ) + parser.add_argument( + "--rand_seed", + type=int, + default=2001, + help="Random seed used for creating the validation split", + ) + parser.add_argument( + "--train_file", + type=str, + default="", + help="Text file where each line is a wav file in train split", + ) + parser.add_argument( + "--test_file", + type=str, + default="", + help="Text file where each line is a wav file in test split", + ) + parser.add_argument( + "--input_class_names", + type=str, + default="", + help='Specify names of classes that you want to change. Separate with ";"', + ) + parser.add_argument( + "--output_class_names", + type=str, + default="", + help='New class names to use instead. One to one mapping with "--input_class_names". \ + Separate with ";"', + ) args = vars(parser.parse_args()) + np.random.seed(args["rand_seed"]) - np.random.seed(args['rand_seed']) + classes_to_ignore = ["", " ", "Unknown", "Not Bat"] + generic_class = ["Bat"] + events_of_interest = ["Echolocation"] - classes_to_ignore = ['', ' ', 'Unknown', 'Not Bat'] - generic_class = ['Bat'] - events_of_interest = ['Echolocation'] - - if args['input_class_names'] != '' and args['output_class_names'] != '': + if args["input_class_names"] != "" and args["output_class_names"] != "": # change the names of the classes - ip_names = args['input_class_names'].split(';') - op_names = args['output_class_names'].split(';') + ip_names = args["input_class_names"].split(";") + op_names = args["output_class_names"].split(";") name_dict = dict(zip(ip_names, op_names)) else: name_dict = False # load annotations - data_all, _, _ = tu.load_set_of_anns({'ann_path': args['ann_dir'], 'wav_path': args['audio_dir']}, - classes_to_ignore, events_of_interest, False, False, - list_of_anns=True, filter_issues=True, name_replace=name_dict) + data_all, _, _ = tu.load_set_of_anns( + {"ann_path": args["ann_dir"], "wav_path": args["audio_dir"]}, + classes_to_ignore, + events_of_interest, + False, + False, + list_of_anns=True, + filter_issues=True, + name_replace=name_dict, + ) - print('Dataset name: ' + args['dataset_name']) - print('Audio directory: ' + args['audio_dir']) - print('Annotation directory: ' + args['ann_dir']) - print('Ouput directory: ' + args['op_dir']) - print('Num annotated files: ' + str(len(data_all))) + print("Dataset name: " + args["dataset_name"]) + print("Audio directory: " + args["audio_dir"]) + print("Annotation directory: " + args["ann_dir"]) + print("Ouput directory: " + args["op_dir"]) + print("Num annotated files: " + str(len(data_all))) - if args['train_file'] != '' and args['test_file'] != '': + if args["train_file"] != "" and args["test_file"] != "": # user has specifed the train / test split - train_files = load_file_names(args['train_file']) - test_files = load_file_names(args['test_file']) - file_names_all = [dd['id'] for dd in data_all] - train_inds = [file_names_all.index(ff) for ff in train_files if ff in file_names_all] - test_inds = [file_names_all.index(ff) for ff in test_files if ff in file_names_all] + train_files = load_file_names(args["train_file"]) + test_files = load_file_names(args["test_file"]) + file_names_all = [dd["id"] for dd in data_all] + train_inds = [ + file_names_all.index(ff) + for ff in train_files + if ff in file_names_all + ] + test_inds = [ + file_names_all.index(ff) + for ff in test_files + if ff in file_names_all + ] else: # split the data into train and test at the file level num_exs = len(data_all) - test_inds = np.random.choice(np.arange(num_exs), int(num_exs*args['percent_val']), replace=False) + test_inds = np.random.choice( + np.arange(num_exs), + int(num_exs * args["percent_val"]), + replace=False, + ) test_inds = np.sort(test_inds) train_inds = np.setdiff1d(np.arange(num_exs), test_inds) data_train = [data_all[ii] for ii in train_inds] data_test = [data_all[ii] for ii in test_inds] - if not os.path.isdir(args['op_dir']): - os.makedirs(args['op_dir']) - op_name = os.path.join(args['op_dir'], args['dataset_name']) - op_name_train = op_name + '_TRAIN.json' - op_name_test = op_name + '_TEST.json' + if not os.path.isdir(args["op_dir"]): + os.makedirs(args["op_dir"]) + op_name = os.path.join(args["op_dir"], args["dataset_name"]) + op_name_train = op_name + "_TRAIN.json" + op_name_test = op_name + "_TEST.json" - class_un_train = print_dataset_stats(data_train, 'Train', classes_to_ignore) - class_un_test = print_dataset_stats(data_test, 'Test', classes_to_ignore) + class_un_train = print_dataset_stats(data_train, "Train", classes_to_ignore) + class_un_test = print_dataset_stats(data_test, "Test", classes_to_ignore) if len(data_train) > 0 and len(data_test) > 0: if class_un_train != class_un_test: - print('\nError: some classes are not in both the training and test sets.\ - \nTry a different random seed "--rand_seed".') + print( + '\nError: some classes are not in both the training and test sets.\ + \nTry a different random seed "--rand_seed".' + ) assert False - print('\n') + print("\n") if len(data_train) == 0: - print('No train annotations to save') + print("No train annotations to save") else: - print('Saving: ', op_name_train) - with open(op_name_train, 'w') as da: + print("Saving: ", op_name_train) + with open(op_name_train, "w") as da: json.dump(data_train, da, indent=2) if len(data_test) == 0: - print('No test annotations to save') + print("No test annotations to save") else: - print('Saving: ', op_name_test) - with open(op_name_test, 'w') as da: + print("Saving: ", op_name_test) + with open(op_name_test, "w") as da: json.dump(data_test, da, indent=2) diff --git a/models/Net2DFast_UK_same.pth.tar b/bat_detect/models/Net2DFast_UK_same.pth.tar similarity index 100% rename from models/Net2DFast_UK_same.pth.tar rename to bat_detect/models/Net2DFast_UK_same.pth.tar diff --git a/models/readme.md b/bat_detect/models/readme.md similarity index 100% rename from models/readme.md rename to bat_detect/models/readme.md diff --git a/bat_detect/train/audio_dataloader.py b/bat_detect/train/audio_dataloader.py index a36ec0b..6d4d9d8 100644 --- a/bat_detect/train/audio_dataloader.py +++ b/bat_detect/train/audio_dataloader.py @@ -1,71 +1,144 @@ -import torch -import random -import numpy as np import copy +from typing import Tuple + import librosa +import numpy as np +import torch import torch.nn.functional as F import torchaudio -import os -import sys -sys.path.append(os.path.join('..', '..')) import bat_detect.utils.audio_utils as au +from bat_detect.types import AnnotationGroup, HeatmapParameters -def generate_gt_heatmaps(spec_op_shape, sampling_rate, ann, params): +def generate_gt_heatmaps( + spec_op_shape: Tuple[int, int], + sampling_rate: int, + ann: AnnotationGroup, + params: HeatmapParameters, +) -> Tuple[np.ndarray, np.ndarray, np.ndarray, AnnotationGroup]: + """Generate ground truth heatmaps from annotations. + + Parameters + ---------- + spec_op_shape : Tuple[int, int] + Shape of the input spectrogram. + sampling_rate : int + Sampling rate of the input audio in Hz. + ann : AnnotationGroup + Dictionary containing the annotation information. + params : HeatmapParameters + Parameters controlling the generation of the heatmaps. + + Returns + ------- + + y_2d_det : np.ndarray + 2D heatmap of the presence of an event. + + y_2d_size : np.ndarray + 2D heatmap of the size of the bounding box associated to event. + + y_2d_classes : np.ndarray + 3D array containing the ground-truth class probabilities for each + pixel. + + ann_aug : AnnotationGroup + A dictionary containing the annotation information of the + annotations that are within the input spectrogram, augmented with + the x and y indices of their pixel location in the input spectrogram. + + """ # spec may be resized on input into the network - num_classes = len(params['class_names']) - op_height = spec_op_shape[0] - op_width = spec_op_shape[1] - freq_per_bin = (params['max_freq'] - params['min_freq']) / op_height + num_classes = len(params["class_names"]) + op_height = spec_op_shape[0] + op_width = spec_op_shape[1] + freq_per_bin = (params["max_freq"] - params["min_freq"]) / op_height # start and end times - x_pos_start = au.time_to_x_coords(ann['start_times'], sampling_rate, - params['fft_win_length'], params['fft_overlap']) - x_pos_start = (params['resize_factor']*x_pos_start).astype(np.int) - x_pos_end = au.time_to_x_coords(ann['end_times'], sampling_rate, - params['fft_win_length'], params['fft_overlap']) - x_pos_end = (params['resize_factor']*x_pos_end).astype(np.int) + x_pos_start = au.time_to_x_coords( + ann["start_times"], + sampling_rate, + params["fft_win_length"], + params["fft_overlap"], + ) + x_pos_start = (params["resize_factor"] * x_pos_start).astype(np.int) + x_pos_end = au.time_to_x_coords( + ann["end_times"], + sampling_rate, + params["fft_win_length"], + params["fft_overlap"], + ) + x_pos_end = (params["resize_factor"] * x_pos_end).astype(np.int) # location on y axis i.e. frequency - y_pos_low = (ann['low_freqs'] - params['min_freq']) / freq_per_bin - y_pos_low = (op_height - y_pos_low).astype(np.int) - y_pos_high = (ann['high_freqs'] - params['min_freq']) / freq_per_bin + y_pos_low = (ann["low_freqs"] - params["min_freq"]) / freq_per_bin + y_pos_low = (op_height - y_pos_low).astype(np.int) + y_pos_high = (ann["high_freqs"] - params["min_freq"]) / freq_per_bin y_pos_high = (op_height - y_pos_high).astype(np.int) - bb_widths = x_pos_end - x_pos_start - bb_heights = (y_pos_low - y_pos_high) + bb_widths = x_pos_end - x_pos_start + bb_heights = y_pos_low - y_pos_high - valid_inds = np.where((x_pos_start >= 0) & (x_pos_start < op_width) & - (y_pos_low >= 0) & (y_pos_low < (op_height-1)))[0] + # Only include annotations that are within the input spectrogram + valid_inds = np.where( + (x_pos_start >= 0) + & (x_pos_start < op_width) + & (y_pos_low >= 0) + & (y_pos_low < (op_height - 1)) + )[0] - ann_aug = {} - ann_aug['x_inds'] = x_pos_start[valid_inds] - ann_aug['y_inds'] = y_pos_low[valid_inds] - keys = ['start_times', 'end_times', 'high_freqs', 'low_freqs', 'class_ids', 'individual_ids'] - for kk in keys: - ann_aug[kk] = ann[kk][valid_inds] + ann_aug: AnnotationGroup = { + "start_times": ann["start_times"][valid_inds], + "end_times": ann["end_times"][valid_inds], + "high_freqs": ann["high_freqs"][valid_inds], + "low_freqs": ann["low_freqs"][valid_inds], + "class_ids": ann["class_ids"][valid_inds], + "individual_ids": ann["individual_ids"][valid_inds], + } + ann_aug["x_inds"] = x_pos_start[valid_inds] + ann_aug["y_inds"] = y_pos_low[valid_inds] + # keys = [ + # "start_times", + # "end_times", + # "high_freqs", + # "low_freqs", + # "class_ids", + # "individual_ids", + # ] + # for kk in keys: + # ann_aug[kk] = ann[kk][valid_inds] # if the number of calls is only 1, then it is unique # TODO would be better if we found these unique calls at the merging stage - if len(ann_aug['individual_ids']) == 1: - ann_aug['individual_ids'][0] = 0 + if len(ann_aug["individual_ids"]) == 1: + ann_aug["individual_ids"][0] = 0 - y_2d_det = np.zeros((1, op_height, op_width), dtype=np.float32) + y_2d_det = np.zeros((1, op_height, op_width), dtype=np.float32) y_2d_size = np.zeros((2, op_height, op_width), dtype=np.float32) # num classes and "background" class - y_2d_classes = np.zeros((num_classes+1, op_height, op_width), dtype=np.float32) + y_2d_classes: np.ndarray = np.zeros( + (num_classes + 1, op_height, op_width), dtype=np.float32 + ) # create 2D ground truth heatmaps for ii in valid_inds: - draw_gaussian(y_2d_det[0,:], (x_pos_start[ii], y_pos_low[ii]), params['target_sigma']) - #draw_gaussian(y_2d_det[0,:], (x_pos_start[ii], y_pos_low[ii]), params['target_sigma'], params['target_sigma']*2) + draw_gaussian( + y_2d_det[0, :], + (x_pos_start[ii], y_pos_low[ii]), + params["target_sigma"], + ) + # draw_gaussian(y_2d_det[0,:], (x_pos_start[ii], y_pos_low[ii]), params['target_sigma'], params['target_sigma']*2) y_2d_size[0, y_pos_low[ii], x_pos_start[ii]] = bb_widths[ii] y_2d_size[1, y_pos_low[ii], x_pos_start[ii]] = bb_heights[ii] - cls_id = ann['class_ids'][ii] + cls_id = ann["class_ids"][ii] if cls_id > -1: - draw_gaussian(y_2d_classes[cls_id, :], (x_pos_start[ii], y_pos_low[ii]), params['target_sigma']) - #draw_gaussian(y_2d_classes[cls_id, :], (x_pos_start[ii], y_pos_low[ii]), params['target_sigma'], params['target_sigma']*2) + draw_gaussian( + y_2d_classes[cls_id, :], + (x_pos_start[ii], y_pos_low[ii]), + params["target_sigma"], + ) + # draw_gaussian(y_2d_classes[cls_id, :], (x_pos_start[ii], y_pos_low[ii]), params['target_sigma'], params['target_sigma']*2) # be careful as this will have a 1.0 places where we have event but dont know gt class # this will be masked in training anyway @@ -96,20 +169,24 @@ def draw_gaussian(heatmap, center, sigmax, sigmay=None): x = np.arange(0, size, 1, np.float32) y = x[:, np.newaxis] x0 = y0 = size // 2 - #g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2)) - g = np.exp(- ((x - x0) ** 2)/(2 * sigmax ** 2) - ((y - y0) ** 2)/(2 * sigmay ** 2)) + # g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2)) + g = np.exp( + -((x - x0) ** 2) / (2 * sigmax**2) + - ((y - y0) ** 2) / (2 * sigmay**2) + ) g_x = max(0, -ul[0]), min(br[0], h) - ul[0] g_y = max(0, -ul[1]), min(br[1], w) - ul[1] img_x = max(0, ul[0]), min(br[0], h) img_y = max(0, ul[1]), min(br[1], w) - heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]] = np.maximum( - heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]], - g[g_y[0]:g_y[1], g_x[0]:g_x[1]]) + heatmap[img_y[0] : img_y[1], img_x[0] : img_x[1]] = np.maximum( + heatmap[img_y[0] : img_y[1], img_x[0] : img_x[1]], + g[g_y[0] : g_y[1], g_x[0] : g_x[1]], + ) return True def pad_aray(ip_array, pad_size): - return np.hstack((ip_array, np.ones(pad_size, dtype=np.int)*-1)) + return np.hstack((ip_array, np.ones(pad_size, dtype=np.int) * -1)) def warp_spec_aug(spec, ann, return_spec_for_viz, params): @@ -121,24 +198,37 @@ def warp_spec_aug(spec, ann, return_spec_for_viz, params): if return_spec_for_viz: assert False - delta = params['stretch_squeeze_delta'] + delta = params["stretch_squeeze_delta"] op_size = (spec.shape[1], spec.shape[2]) - resize_fract_r = np.random.rand()*delta*2 - delta + 1.0 - resize_amt = int(spec.shape[2]*resize_fract_r) + resize_fract_r = np.random.rand() * delta * 2 - delta + 1.0 + resize_amt = int(spec.shape[2] * resize_fract_r) if resize_amt >= spec.shape[2]: - spec_r = torch.cat((spec, torch.zeros((1, spec.shape[1], resize_amt-spec.shape[2]), dtype=spec.dtype)), 2) + spec_r = torch.cat( + ( + spec, + torch.zeros( + (1, spec.shape[1], resize_amt - spec.shape[2]), + dtype=spec.dtype, + ), + ), + 2, + ) else: spec_r = spec[:, :, :resize_amt] - spec = F.interpolate(spec_r.unsqueeze(0), size=op_size, mode='bilinear', align_corners=False).squeeze(0) - ann['start_times'] *= (1.0/resize_fract_r) - ann['end_times'] *= (1.0/resize_fract_r) + spec = F.interpolate( + spec_r.unsqueeze(0), size=op_size, mode="bilinear", align_corners=False + ).squeeze(0) + ann["start_times"] *= 1.0 / resize_fract_r + ann["end_times"] *= 1.0 / resize_fract_r return spec def mask_time_aug(spec, params): # Mask out a random block of time - repeat up to 3 times # SpecAugment: A Simple Data Augmentation Methodfor Automatic Speech Recognition - fm = torchaudio.transforms.TimeMasking(int(spec.shape[1]*params['mask_max_time_perc'])) + fm = torchaudio.transforms.TimeMasking( + int(spec.shape[1] * params["mask_max_time_perc"]) + ) for ii in range(np.random.randint(1, 4)): spec = fm(spec) return spec @@ -147,40 +237,65 @@ def mask_time_aug(spec, params): def mask_freq_aug(spec, params): # Mask out a random frequncy range - repeat up to 3 times # SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition - fm = torchaudio.transforms.FrequencyMasking(int(spec.shape[1]*params['mask_max_freq_perc'])) + fm = torchaudio.transforms.FrequencyMasking( + int(spec.shape[1] * params["mask_max_freq_perc"]) + ) for ii in range(np.random.randint(1, 4)): spec = fm(spec) return spec def scale_vol_aug(spec, params): - return spec * np.random.random()*params['spec_amp_scaling'] + return spec * np.random.random() * params["spec_amp_scaling"] def echo_aug(audio, sampling_rate, params): - sample_offset = int(params['echo_max_delay']*np.random.random()*sampling_rate) + 1 - audio[:-sample_offset] += np.random.random()*audio[sample_offset:] + sample_offset = ( + int(params["echo_max_delay"] * np.random.random() * sampling_rate) + 1 + ) + audio[:-sample_offset] += np.random.random() * audio[sample_offset:] return audio def resample_aug(audio, sampling_rate, params): sampling_rate_old = sampling_rate - sampling_rate = np.random.choice(params['aug_sampling_rates']) - audio = librosa.resample(audio, sampling_rate_old, sampling_rate, res_type='polyphase') + sampling_rate = np.random.choice(params["aug_sampling_rates"]) + audio = librosa.resample( + audio, + orig_sr=sampling_rate_old, + target_sr=sampling_rate, + res_type="polyphase", + ) - audio = au.pad_audio(audio, sampling_rate, params['fft_win_length'], - params['fft_overlap'], params['resize_factor'], - params['spec_divide_factor'], params['spec_train_width']) + audio = au.pad_audio( + audio, + sampling_rate, + params["fft_win_length"], + params["fft_overlap"], + params["resize_factor"], + params["spec_divide_factor"], + params["spec_train_width"], + ) duration = audio.shape[0] / float(sampling_rate) return audio, sampling_rate, duration def resample_audio(num_samples, sampling_rate, audio2, sampling_rate2): if sampling_rate != sampling_rate2: - audio2 = librosa.resample(audio2, sampling_rate2, sampling_rate, res_type='polyphase') + audio2 = librosa.resample( + audio2, + orig_sr=sampling_rate2, + target_sr=sampling_rate, + res_type="polyphase", + ) sampling_rate2 = sampling_rate if audio2.shape[0] < num_samples: - audio2 = np.hstack((audio2, np.zeros((num_samples-audio2.shape[0]), dtype=audio2.dtype))) + audio2 = np.hstack( + ( + audio2, + np.zeros((num_samples - audio2.shape[0]), dtype=audio2.dtype), + ) + ) elif audio2.shape[0] > num_samples: audio2 = audio2[:num_samples] return audio2, sampling_rate2 @@ -189,26 +304,32 @@ def resample_audio(num_samples, sampling_rate, audio2, sampling_rate2): def combine_audio_aug(audio, sampling_rate, ann, audio2, sampling_rate2, ann2): # resample so they are the same - audio2, sampling_rate2 = resample_audio(audio.shape[0], sampling_rate, audio2, sampling_rate2) + audio2, sampling_rate2 = resample_audio( + audio.shape[0], sampling_rate, audio2, sampling_rate2 + ) # # set mean and std to be the same # audio2 = (audio2 - audio2.mean()) # audio2 = (audio2/audio2.std())*audio.std() # audio2 = audio2 + audio.mean() - if ann['annotated'] and (ann2['annotated']) and \ - (sampling_rate2 == sampling_rate) and (audio.shape[0] == audio2.shape[0]): - comb_weight = 0.3 + np.random.random()*0.4 - audio = comb_weight*audio + (1-comb_weight)*audio2 - inds = np.argsort(np.hstack((ann['start_times'], ann2['start_times']))) + if ( + ann["annotated"] + and (ann2["annotated"]) + and (sampling_rate2 == sampling_rate) + and (audio.shape[0] == audio2.shape[0]) + ): + comb_weight = 0.3 + np.random.random() * 0.4 + audio = comb_weight * audio + (1 - comb_weight) * audio2 + inds = np.argsort(np.hstack((ann["start_times"], ann2["start_times"]))) for kk in ann.keys(): # when combining calls from different files, assume they come from different individuals - if kk == 'individual_ids': - if (ann[kk]>-1).sum() > 0: - ann2[kk][ann2[kk]>-1] += np.max(ann[kk][ann[kk]>-1]) + 1 + if kk == "individual_ids": + if (ann[kk] > -1).sum() > 0: + ann2[kk][ann2[kk] > -1] += np.max(ann[kk][ann[kk] > -1]) + 1 - if (kk != 'class_id_file') and (kk != 'annotated'): + if (kk != "class_id_file") and (kk != "annotated"): ann[kk] = np.hstack((ann[kk], ann2[kk]))[inds] return audio, ann @@ -227,53 +348,70 @@ class AudioLoader(torch.utils.data.Dataset): # filter out unused annotation here filtered_annotations = [] - for ii, aa in enumerate(dd['annotation']): + for ii, aa in enumerate(dd["annotation"]): - if 'individual' in aa.keys(): - aa['individual'] = int(aa['individual']) + if "individual" in aa.keys(): + aa["individual"] = int(aa["individual"]) # if only one call labeled it has to be from the same individual - if len(dd['annotation']) == 1: - aa['individual'] = 0 + if len(dd["annotation"]) == 1: + aa["individual"] = 0 # convert class name into class label - if aa['class'] in self.params['class_names']: - aa['class_id'] = self.params['class_names'].index(aa['class']) + if aa["class"] in self.params["class_names"]: + aa["class_id"] = self.params["class_names"].index( + aa["class"] + ) else: - aa['class_id'] = -1 + aa["class_id"] = -1 - if aa['class'] not in self.params['classes_to_ignore']: + if aa["class"] not in self.params["classes_to_ignore"]: filtered_annotations.append(aa) - dd['annotation'] = filtered_annotations - dd['start_times'] = np.array([aa['start_time'] for aa in dd['annotation']]) - dd['end_times'] = np.array([aa['end_time'] for aa in dd['annotation']]) - dd['high_freqs'] = np.array([float(aa['high_freq']) for aa in dd['annotation']]) - dd['low_freqs'] = np.array([float(aa['low_freq']) for aa in dd['annotation']]) - dd['class_ids'] = np.array([aa['class_id'] for aa in dd['annotation']]).astype(np.int) - dd['individual_ids'] = np.array([aa['individual'] for aa in dd['annotation']]).astype(np.int) + dd["annotation"] = filtered_annotations + dd["start_times"] = np.array( + [aa["start_time"] for aa in dd["annotation"]] + ) + dd["end_times"] = np.array( + [aa["end_time"] for aa in dd["annotation"]] + ) + dd["high_freqs"] = np.array( + [float(aa["high_freq"]) for aa in dd["annotation"]] + ) + dd["low_freqs"] = np.array( + [float(aa["low_freq"]) for aa in dd["annotation"]] + ) + dd["class_ids"] = np.array( + [aa["class_id"] for aa in dd["annotation"]] + ).astype(np.int) + dd["individual_ids"] = np.array( + [aa["individual"] for aa in dd["annotation"]] + ).astype(np.int) # file level class name - dd['class_id_file'] = -1 - if 'class_name' in dd.keys(): - if dd['class_name'] in self.params['class_names']: - dd['class_id_file'] = self.params['class_names'].index(dd['class_name']) + dd["class_id_file"] = -1 + if "class_name" in dd.keys(): + if dd["class_name"] in self.params["class_names"]: + dd["class_id_file"] = self.params["class_names"].index( + dd["class_name"] + ) self.data_anns.append(dd) - ann_cnt = [len(aa['annotation']) for aa in self.data_anns] - self.max_num_anns = 2*np.max(ann_cnt) # x2 because we may be combining files during training + ann_cnt = [len(aa["annotation"]) for aa in self.data_anns] + self.max_num_anns = 2 * np.max( + ann_cnt + ) # x2 because we may be combining files during training - print('\n') + print("\n") if dataset_name is not None: - print('Dataset : ' + dataset_name) + print("Dataset : " + dataset_name) if self.is_train: - print('Split type : train') + print("Split type : train") else: - print('Split type : test') - print('Num files : ' + str(len(self.data_anns))) - print('Num calls : ' + str(np.sum(ann_cnt))) - + print("Split type : test") + print("Num files : " + str(len(self.data_anns))) + print("Num calls : " + str(np.sum(ann_cnt))) def get_file_and_anns(self, index=None): @@ -281,110 +419,169 @@ class AudioLoader(torch.utils.data.Dataset): if index == None: index = np.random.randint(0, len(self.data_anns)) - audio_file = self.data_anns[index]['file_path'] - sampling_rate, audio_raw = au.load_audio_file(audio_file, self.data_anns[index]['time_exp'], - self.params['target_samp_rate'], self.params['scale_raw_audio']) + audio_file = self.data_anns[index]["file_path"] + sampling_rate, audio_raw = au.load_audio( + audio_file, + self.data_anns[index]["time_exp"], + self.params["target_samp_rate"], + self.params["scale_raw_audio"], + ) # copy annotation ann = {} - ann['annotated'] = self.data_anns[index]['annotated'] - ann['class_id_file'] = self.data_anns[index]['class_id_file'] - keys = ['start_times', 'end_times', 'high_freqs', 'low_freqs', 'class_ids', 'individual_ids'] + ann["annotated"] = self.data_anns[index]["annotated"] + ann["class_id_file"] = self.data_anns[index]["class_id_file"] + keys = [ + "start_times", + "end_times", + "high_freqs", + "low_freqs", + "class_ids", + "individual_ids", + ] for kk in keys: ann[kk] = self.data_anns[index][kk].copy() # if train then grab a random crop if self.is_train: - nfft = int(self.params['fft_win_length']*sampling_rate) - noverlap = int(self.params['fft_overlap']*nfft) - length_samples = self.params['spec_train_width']*(nfft - noverlap) + noverlap + nfft = int(self.params["fft_win_length"] * sampling_rate) + noverlap = int(self.params["fft_overlap"] * nfft) + length_samples = ( + self.params["spec_train_width"] * (nfft - noverlap) + noverlap + ) if audio_raw.shape[0] - length_samples > 0: - sample_crop = np.random.randint(audio_raw.shape[0] - length_samples) + sample_crop = np.random.randint( + audio_raw.shape[0] - length_samples + ) else: sample_crop = 0 - audio_raw = audio_raw[sample_crop:sample_crop+length_samples] - ann['start_times'] = ann['start_times'] - sample_crop/float(sampling_rate) - ann['end_times'] = ann['end_times'] - sample_crop/float(sampling_rate) + audio_raw = audio_raw[sample_crop : sample_crop + length_samples] + ann["start_times"] = ann["start_times"] - sample_crop / float( + sampling_rate + ) + ann["end_times"] = ann["end_times"] - sample_crop / float( + sampling_rate + ) # pad audio if self.is_train: - op_spec_target_size = self.params['spec_train_width'] + op_spec_target_size = self.params["spec_train_width"] else: op_spec_target_size = None - audio_raw = au.pad_audio(audio_raw, sampling_rate, self.params['fft_win_length'], - self.params['fft_overlap'], self.params['resize_factor'], - self.params['spec_divide_factor'], op_spec_target_size) + audio_raw = au.pad_audio( + audio_raw, + sampling_rate, + self.params["fft_win_length"], + self.params["fft_overlap"], + self.params["resize_factor"], + self.params["spec_divide_factor"], + op_spec_target_size, + ) duration = audio_raw.shape[0] / float(sampling_rate) # sort based on time - inds = np.argsort(ann['start_times']) + inds = np.argsort(ann["start_times"]) for kk in ann.keys(): - if (kk != 'class_id_file') and (kk != 'annotated'): + if (kk != "class_id_file") and (kk != "annotated"): ann[kk] = ann[kk][inds] return audio_raw, sampling_rate, duration, ann - def __getitem__(self, index): # load audio file audio, sampling_rate, duration, ann = self.get_file_and_anns(index) # augment on raw audio - if self.is_train and self.params['augment_at_train']: + if self.is_train and self.params["augment_at_train"]: # augment - combine with random audio file - if self.params['augment_at_train_combine'] and np.random.random() < self.params['aug_prob']: - audio2, sampling_rate2, duration2, ann2 = self.get_file_and_anns() - audio, ann = combine_audio_aug(audio, sampling_rate, ann, audio2, sampling_rate2, ann2) + if ( + self.params["augment_at_train_combine"] + and np.random.random() < self.params["aug_prob"] + ): + ( + audio2, + sampling_rate2, + duration2, + ann2, + ) = self.get_file_and_anns() + audio, ann = combine_audio_aug( + audio, sampling_rate, ann, audio2, sampling_rate2, ann2 + ) # simulate echo by adding delayed copy of the file - if np.random.random() < self.params['aug_prob']: + if np.random.random() < self.params["aug_prob"]: audio = echo_aug(audio, sampling_rate, self.params) # resample the audio - #if np.random.random() < self.params['aug_prob']: + # if np.random.random() < self.params['aug_prob']: # audio, sampling_rate, duration = resample_aug(audio, sampling_rate, self.params) # create spectrogram - spec, spec_for_viz = au.generate_spectrogram(audio, sampling_rate, self.params, self.return_spec_for_viz) - rsf = self.params['resize_factor'] - spec_op_shape = (int(self.params['spec_height']*rsf), int(spec.shape[1]*rsf)) + spec, spec_for_viz = au.generate_spectrogram( + audio, sampling_rate, self.params, self.return_spec_for_viz + ) + rsf = self.params["resize_factor"] + spec_op_shape = ( + int(self.params["spec_height"] * rsf), + int(spec.shape[1] * rsf), + ) # resize the spec spec = torch.from_numpy(spec).unsqueeze(0).unsqueeze(0) - spec = F.interpolate(spec, size=spec_op_shape, mode='bilinear', align_corners=False).squeeze(0) + spec = F.interpolate( + spec, size=spec_op_shape, mode="bilinear", align_corners=False + ).squeeze(0) # augment spectrogram - if self.is_train and self.params['augment_at_train']: + if self.is_train and self.params["augment_at_train"]: - if np.random.random() < self.params['aug_prob']: + if np.random.random() < self.params["aug_prob"]: spec = scale_vol_aug(spec, self.params) - if np.random.random() < self.params['aug_prob']: - spec = warp_spec_aug(spec, ann, self.return_spec_for_viz, self.params) + if np.random.random() < self.params["aug_prob"]: + spec = warp_spec_aug( + spec, ann, self.return_spec_for_viz, self.params + ) - if np.random.random() < self.params['aug_prob']: + if np.random.random() < self.params["aug_prob"]: spec = mask_time_aug(spec, self.params) - if np.random.random() < self.params['aug_prob']: + if np.random.random() < self.params["aug_prob"]: spec = mask_freq_aug(spec, self.params) outputs = {} - outputs['spec'] = spec + outputs["spec"] = spec if self.return_spec_for_viz: - outputs['spec_for_viz'] = torch.from_numpy(spec_for_viz).unsqueeze(0) + outputs["spec_for_viz"] = torch.from_numpy(spec_for_viz).unsqueeze( + 0 + ) # create ground truth heatmaps - outputs['y_2d_det'], outputs['y_2d_size'], outputs['y_2d_classes'], ann_aug =\ - generate_gt_heatmaps(spec_op_shape, sampling_rate, ann, self.params) + ( + outputs["y_2d_det"], + outputs["y_2d_size"], + outputs["y_2d_classes"], + ann_aug, + ) = generate_gt_heatmaps(spec_op_shape, sampling_rate, ann, self.params) # hack to get around requirement that all vectors are the same length in # the output batch - pad_size = self.max_num_anns-len(ann_aug['individual_ids']) - outputs['is_valid'] = pad_aray(np.ones(len(ann_aug['individual_ids'])), pad_size) - keys = ['class_ids', 'individual_ids', 'x_inds', 'y_inds', - 'start_times', 'end_times', 'low_freqs', 'high_freqs'] + pad_size = self.max_num_anns - len(ann_aug["individual_ids"]) + outputs["is_valid"] = pad_aray( + np.ones(len(ann_aug["individual_ids"])), pad_size + ) + keys = [ + "class_ids", + "individual_ids", + "x_inds", + "y_inds", + "start_times", + "end_times", + "low_freqs", + "high_freqs", + ] for kk in keys: outputs[kk] = pad_aray(ann_aug[kk], pad_size) @@ -394,14 +591,13 @@ class AudioLoader(torch.utils.data.Dataset): outputs[kk] = torch.from_numpy(outputs[kk]) # scalars - outputs['class_id_file'] = ann['class_id_file'] - outputs['annotated'] = ann['annotated'] - outputs['duration'] = duration - outputs['sampling_rate'] = sampling_rate - outputs['file_id'] = index + outputs["class_id_file"] = ann["class_id_file"] + outputs["annotated"] = ann["annotated"] + outputs["duration"] = duration + outputs["sampling_rate"] = sampling_rate + outputs["file_id"] = index return outputs - def __len__(self): return len(self.data_anns) diff --git a/bat_detect/train/evaluate.py b/bat_detect/train/evaluate.py index b88719f..a926fbb 100755 --- a/bat_detect/train/evaluate.py +++ b/bat_detect/train/evaluate.py @@ -1,6 +1,10 @@ import numpy as np -from sklearn.metrics import roc_curve, auc -from sklearn.metrics import accuracy_score, balanced_accuracy_score +from sklearn.metrics import ( + accuracy_score, + auc, + balanced_accuracy_score, + roc_curve, +) def compute_error_auc(op_str, gt, pred, prob): @@ -13,8 +17,11 @@ def compute_error_auc(op_str, gt, pred, prob): fpr, tpr, thresholds = roc_curve(gt, pred) roc_auc = auc(fpr, tpr) - print(op_str + ", class acc = {:.3f}, ROC AUC = {:.3f}".format(class_acc, roc_auc)) - #return class_acc, roc_auc + print( + op_str + + ", class acc = {:.3f}, ROC AUC = {:.3f}".format(class_acc, roc_auc) + ) + # return class_acc, roc_auc def calc_average_precision(recall, precision): @@ -25,10 +32,10 @@ def calc_average_precision(recall, precision): # pascal 12 way mprec = np.hstack((0, precision, 0)) mrec = np.hstack((0, recall, 1)) - for ii in range(mprec.shape[0]-2, -1,-1): - mprec[ii] = np.maximum(mprec[ii], mprec[ii+1]) - inds = np.where(np.not_equal(mrec[1:], mrec[:-1]))[0]+1 - ave_prec = ((mrec[inds] - mrec[inds-1])*mprec[inds]).sum() + for ii in range(mprec.shape[0] - 2, -1, -1): + mprec[ii] = np.maximum(mprec[ii], mprec[ii + 1]) + inds = np.where(np.not_equal(mrec[1:], mrec[:-1]))[0] + 1 + ave_prec = ((mrec[inds] - mrec[inds - 1]) * mprec[inds]).sum() return float(ave_prec) @@ -37,7 +44,7 @@ def calc_recall_at_x(recall, precision, x=0.95): precision[np.isnan(precision)] = 0 recall[np.isnan(recall)] = 0 - inds = np.where(precision[::-1]>x)[0] + inds = np.where(precision[::-1] > x)[0] if len(inds) > 0: return float(recall[::-1][inds[0]]) else: @@ -51,7 +58,15 @@ def compute_affinity_1d(pred_box, gt_boxes, threshold): return valid_detection, np.argmin(score) -def compute_pre_rec(gts, preds, eval_mode, class_of_interest, num_classes, threshold, ignore_start_end): +def compute_pre_rec( + gts, + preds, + eval_mode, + class_of_interest, + num_classes, + threshold, + ignore_start_end, +): """ Computes precision and recall. Assumes that each file has been exhaustively annotated. Will not count predicted detection with a start time that is within @@ -78,26 +93,40 @@ def compute_pre_rec(gts, preds, eval_mode, class_of_interest, num_classes, thres for pid, pp in enumerate(preds): # filter predicted calls that are too near the start or end of the file - file_dur = gts[pid]['duration'] - valid_inds = (pp['start_times'] >= ignore_start_end) & (pp['start_times'] <= (file_dur - ignore_start_end)) + file_dur = gts[pid]["duration"] + valid_inds = (pp["start_times"] >= ignore_start_end) & ( + pp["start_times"] <= (file_dur - ignore_start_end) + ) - pred_boxes.append(np.vstack((pp['start_times'][valid_inds], pp['end_times'][valid_inds], - pp['low_freqs'][valid_inds], pp['high_freqs'][valid_inds])).T) + pred_boxes.append( + np.vstack( + ( + pp["start_times"][valid_inds], + pp["end_times"][valid_inds], + pp["low_freqs"][valid_inds], + pp["high_freqs"][valid_inds], + ) + ).T + ) - if eval_mode == 'detection': + if eval_mode == "detection": # overall detection - confidence.append(pp['det_probs'][valid_inds]) - elif eval_mode == 'per_class': + confidence.append(pp["det_probs"][valid_inds]) + elif eval_mode == "per_class": # per class - confidence.append(pp['class_probs'].T[valid_inds, class_of_interest]) - elif eval_mode == 'top_class': + confidence.append( + pp["class_probs"].T[valid_inds, class_of_interest] + ) + elif eval_mode == "top_class": # per class - note that sometimes 'class_probs' can be num_classes+1 in size - top_class = np.argmax(pp['class_probs'].T[valid_inds, :num_classes], 1) - confidence.append(pp['class_probs'].T[valid_inds, top_class]) + top_class = np.argmax( + pp["class_probs"].T[valid_inds, :num_classes], 1 + ) + confidence.append(pp["class_probs"].T[valid_inds, top_class]) pred_class.append(top_class) # be careful, assuming the order in the list is same as GT - file_ids.append([pid]*valid_inds.sum()) + file_ids.append([pid] * valid_inds.sum()) confidence = np.hstack(confidence) file_ids = np.hstack(file_ids).astype(np.int) @@ -105,7 +134,6 @@ def compute_pre_rec(gts, preds, eval_mode, class_of_interest, num_classes, thres if len(pred_class) > 0: pred_class = np.hstack(pred_class) - # extract relevant ground truth boxes gt_boxes = [] gt_assigned = [] @@ -115,32 +143,42 @@ def compute_pre_rec(gts, preds, eval_mode, class_of_interest, num_classes, thres for gg in gts: # filter ground truth calls that are too near the start or end of the file - file_dur = gg['duration'] - valid_inds = (gg['start_times'] >= ignore_start_end) & (gg['start_times'] <= (file_dur - ignore_start_end)) + file_dur = gg["duration"] + valid_inds = (gg["start_times"] >= ignore_start_end) & ( + gg["start_times"] <= (file_dur - ignore_start_end) + ) # note, files with the incorrect duration will cause a problem - if (gg['start_times'] > file_dur).sum() > 0: - print('Error: file duration incorrect for', gg['id']) - assert(False) + if (gg["start_times"] > file_dur).sum() > 0: + print("Error: file duration incorrect for", gg["id"]) + assert False - boxes = np.vstack((gg['start_times'][valid_inds], gg['end_times'][valid_inds], - gg['low_freqs'][valid_inds], gg['high_freqs'][valid_inds])).T - gen_class = gg['class_ids'][valid_inds] == -1 - class_ids = gg['class_ids'][valid_inds] + boxes = np.vstack( + ( + gg["start_times"][valid_inds], + gg["end_times"][valid_inds], + gg["low_freqs"][valid_inds], + gg["high_freqs"][valid_inds], + ) + ).T + gen_class = gg["class_ids"][valid_inds] == -1 + class_ids = gg["class_ids"][valid_inds] # keep track of the number of relevant ground truth calls - if eval_mode == 'detection': + if eval_mode == "detection": # all valid ones - num_positives += len(gg['start_times'][valid_inds]) - elif eval_mode == 'per_class': + num_positives += len(gg["start_times"][valid_inds]) + elif eval_mode == "per_class": # all valid ones with class of interest - num_positives += (gg['class_ids'][valid_inds] == class_of_interest).sum() - elif eval_mode == 'top_class': + num_positives += ( + gg["class_ids"][valid_inds] == class_of_interest + ).sum() + elif eval_mode == "top_class": # all valid ones with non generic class - num_positives += (gg['class_ids'][valid_inds] > -1).sum() + num_positives += (gg["class_ids"][valid_inds] > -1).sum() # find relevant classes (i.e. class_of_interest) and events without known class (i.e. generic class, -1) - if eval_mode == 'per_class': + if eval_mode == "per_class": class_inds = (class_ids == class_of_interest) | (class_ids == -1) boxes = boxes[class_inds, :] gen_class = gen_class[class_inds] @@ -151,25 +189,27 @@ def compute_pre_rec(gts, preds, eval_mode, class_of_interest, num_classes, thres gt_generic_class.append(gen_class) gt_class.append(class_ids) - # loop through detections and keep track of those that have been assigned - true_pos = np.zeros(confidence.shape[0]) - valid_inds = np.ones(confidence.shape[0]) == 1 # intialize to True - sorted_inds = np.argsort(confidence)[::-1] # sort high to low + true_pos = np.zeros(confidence.shape[0]) + valid_inds = np.ones(confidence.shape[0]) == 1 # intialize to True + sorted_inds = np.argsort(confidence)[::-1] # sort high to low for ii, ind in enumerate(sorted_inds): gt_id = file_ids[ind] valid_det = False if gt_boxes[gt_id].shape[0] > 0: # compute overlap - valid_det, det_ind = compute_affinity_1d(pred_boxes[ind], gt_boxes[gt_id], - threshold) + valid_det, det_ind = compute_affinity_1d( + pred_boxes[ind], gt_boxes[gt_id], threshold + ) # valid detection that has not already been assigned if valid_det and (gt_assigned[gt_id][det_ind] == 0): count_as_true_pos = True - if eval_mode == 'top_class' and (gt_class[gt_id][det_ind] != pred_class[ind]): + if eval_mode == "top_class" and ( + gt_class[gt_id][det_ind] != pred_class[ind] + ): # needs to be the same class count_as_true_pos = False @@ -181,40 +221,43 @@ def compute_pre_rec(gts, preds, eval_mode, class_of_interest, num_classes, thres # if event is generic class (i.e. gt_generic_class[gt_id][det_ind] is True) # and eval_mode != 'detection', then ignore it if gt_generic_class[gt_id][det_ind]: - if eval_mode == 'per_class' or eval_mode == 'top_class': + if eval_mode == "per_class" or eval_mode == "top_class": valid_inds[ii] = False - # store threshold values - used for plotting conf_sorted = np.sort(confidence)[::-1][valid_inds] thresholds = np.linspace(0.1, 0.9, 9) thresholds_inds = np.zeros(len(thresholds), dtype=np.int) for ii, tt in enumerate(thresholds): thresholds_inds[ii] = np.argmin(conf_sorted > tt) - thresholds_inds[thresholds_inds==0] = -1 + thresholds_inds[thresholds_inds == 0] = -1 # compute precision and recall - true_pos = true_pos[valid_inds] - false_pos_c = np.cumsum(1-true_pos) - true_pos_c = np.cumsum(true_pos) + true_pos = true_pos[valid_inds] + false_pos_c = np.cumsum(1 - true_pos) + true_pos_c = np.cumsum(true_pos) recall = true_pos_c / num_positives - precision = true_pos_c / np.maximum(true_pos_c + false_pos_c, np.finfo(np.float64).eps) + precision = true_pos_c / np.maximum( + true_pos_c + false_pos_c, np.finfo(np.float64).eps + ) results = {} - results['recall'] = recall - results['precision'] = precision - results['num_gt'] = num_positives + results["recall"] = recall + results["precision"] = precision + results["num_gt"] = num_positives - results['thresholds'] = thresholds - results['thresholds_inds'] = thresholds_inds + results["thresholds"] = thresholds + results["thresholds_inds"] = thresholds_inds if num_positives == 0: - results['avg_prec'] = np.nan - results['rec_at_x'] = np.nan + results["avg_prec"] = np.nan + results["rec_at_x"] = np.nan else: - results['avg_prec'] = np.round(calc_average_precision(recall, precision), 5) - results['rec_at_x'] = np.round(calc_recall_at_x(recall, precision), 5) + results["avg_prec"] = np.round( + calc_average_precision(recall, precision), 5 + ) + results["rec_at_x"] = np.round(calc_recall_at_x(recall, precision), 5) return results @@ -230,19 +273,19 @@ def compute_file_accuracy_simple(gts, preds, num_classes): gt_valid = [] pred_valid = [] for ii in range(len(gts)): - gt_class = np.unique(gts[ii]['class_ids']) + gt_class = np.unique(gts[ii]["class_ids"]) if len(gt_class) == 1 and gt_class[0] != -1: gt_valid.append(gt_class[0]) - pred = preds[ii]['class_probs'][:num_classes, :].T + pred = preds[ii]["class_probs"][:num_classes, :].T pred_valid.append(np.argmax(pred.mean(0))) acc = (np.array(gt_valid) == np.array(pred_valid)).mean() res = {} - res['num_valid_files'] = len(gt_valid) - res['num_total_files'] = len(gts) - res['gt_valid_file'] = gt_valid - res['pred_valid_file'] = pred_valid - res['file_acc'] = np.round(acc, 5) + res["num_valid_files"] = len(gt_valid) + res["num_total_files"] = len(gts) + res["gt_valid_file"] = gt_valid + res["pred_valid_file"] = pred_valid + res["file_acc"] = np.round(acc, 5) return res @@ -256,12 +299,20 @@ def compute_file_accuracy(gts, preds, num_classes): # compute min and max scoring range - then threshold min_val = 0 - mins = [pp['class_probs'].min() for pp in preds if pp['class_probs'].shape[1] > 0] + mins = [ + pp["class_probs"].min() + for pp in preds + if pp["class_probs"].shape[1] > 0 + ] if len(mins) > 0: min_val = np.min(mins) max_val = 1.0 - maxes = [pp['class_probs'].max() for pp in preds if pp['class_probs'].shape[1] > 0] + maxes = [ + pp["class_probs"].max() + for pp in preds + if pp["class_probs"].shape[1] > 0 + ] if len(maxes) > 0: max_val = np.max(maxes) @@ -272,33 +323,37 @@ def compute_file_accuracy(gts, preds, num_classes): gt_valid = [] pred_valid_all = [] for ii in range(len(gts)): - gt_class = np.unique(gts[ii]['class_ids']) + gt_class = np.unique(gts[ii]["class_ids"]) if len(gt_class) == 1 and gt_class[0] != -1: gt_valid.append(gt_class[0]) - pred = preds[ii]['class_probs'][:num_classes, :].T + pred = preds[ii]["class_probs"][:num_classes, :].T p_class = np.zeros(len(thresh)) for tt in range(len(thresh)): - p_class[tt] = (pred*(pred>=thresh[tt])).sum(0).argmax() + p_class[tt] = (pred * (pred >= thresh[tt])).sum(0).argmax() pred_valid_all.append(p_class) # pick the result corresponding to the overall best threshold pred_valid_all = np.vstack(pred_valid_all) - acc_per_thresh = (np.array(gt_valid)[..., np.newaxis] == pred_valid_all).mean(0) + acc_per_thresh = ( + np.array(gt_valid)[..., np.newaxis] == pred_valid_all + ).mean(0) best_thresh = np.argmax(acc_per_thresh) best_acc = acc_per_thresh[best_thresh] pred_valid = pred_valid_all[:, best_thresh].astype(np.int).tolist() res = {} - res['num_valid_files'] = len(gt_valid) - res['num_total_files'] = len(gts) - res['gt_valid_file'] = gt_valid - res['pred_valid_file'] = pred_valid - res['file_acc'] = np.round(best_acc, 5) + res["num_valid_files"] = len(gt_valid) + res["num_total_files"] = len(gts) + res["gt_valid_file"] = gt_valid + res["pred_valid_file"] = pred_valid + res["file_acc"] = np.round(best_acc, 5) return res -def evaluate_predictions(gts, preds, class_names, detection_overlap, ignore_start_end=0.0): +def evaluate_predictions( + gts, preds, class_names, detection_overlap, ignore_start_end=0.0 +): """ Computes metrics derived from the precision and recall. Assumes that gts and preds are both lists of the same lengths, with ground @@ -307,24 +362,50 @@ def evaluate_predictions(gts, preds, class_names, detection_overlap, ignore_star Returns the overall detection results, and per class results """ - assert(len(gts) == len(preds)) + assert len(gts) == len(preds) num_classes = len(class_names) # evaluate detection on its own i.e. ignoring class - det_results = compute_pre_rec(gts, preds, 'detection', None, num_classes, detection_overlap, ignore_start_end) - top_class = compute_pre_rec(gts, preds, 'top_class', None, num_classes, detection_overlap, ignore_start_end) - det_results['top_class'] = top_class + det_results = compute_pre_rec( + gts, + preds, + "detection", + None, + num_classes, + detection_overlap, + ignore_start_end, + ) + top_class = compute_pre_rec( + gts, + preds, + "top_class", + None, + num_classes, + detection_overlap, + ignore_start_end, + ) + det_results["top_class"] = top_class # per class evaluation - det_results['class_pr'] = [] + det_results["class_pr"] = [] for cc in range(num_classes): - res = compute_pre_rec(gts, preds, 'per_class', cc, num_classes, detection_overlap, ignore_start_end) - res['name'] = class_names[cc] - det_results['class_pr'].append(res) + res = compute_pre_rec( + gts, + preds, + "per_class", + cc, + num_classes, + detection_overlap, + ignore_start_end, + ) + res["name"] = class_names[cc] + det_results["class_pr"].append(res) # ignores classes that are not present in the test set - det_results['avg_prec_class'] = np.mean([rs['avg_prec'] for rs in det_results['class_pr'] if rs['num_gt'] > 0]) - det_results['avg_prec_class'] = np.round(det_results['avg_prec_class'], 5) + det_results["avg_prec_class"] = np.mean( + [rs["avg_prec"] for rs in det_results["class_pr"] if rs["num_gt"] > 0] + ) + det_results["avg_prec_class"] = np.round(det_results["avg_prec_class"], 5) # file level evaluation res_file = compute_file_accuracy(gts, preds, num_classes) diff --git a/bat_detect/train/losses.py b/bat_detect/train/losses.py index aaef2c4..02bfdd6 100644 --- a/bat_detect/train/losses.py +++ b/bat_detect/train/losses.py @@ -7,7 +7,9 @@ def bbox_size_loss(pred_size, gt_size): Bounding box size loss. Only compute loss where there is a bounding box. """ gt_size_mask = (gt_size > 0).float() - return (F.l1_loss(pred_size*gt_size_mask, gt_size, reduction='sum') / (gt_size_mask.sum() + 1e-5)) + return F.l1_loss(pred_size * gt_size_mask, gt_size, reduction="sum") / ( + gt_size_mask.sum() + 1e-5 + ) def focal_loss(pred, gt, weights=None, valid_mask=None): @@ -24,20 +26,25 @@ def focal_loss(pred, gt, weights=None, valid_mask=None): neg_inds = gt.lt(1).float() pos_loss = torch.log(pred + eps) * torch.pow(1 - pred, alpha) * pos_inds - neg_loss = torch.log(1 - pred + eps) * torch.pow(pred, alpha) * torch.pow(1 - gt, beta) * neg_inds + neg_loss = ( + torch.log(1 - pred + eps) + * torch.pow(pred, alpha) + * torch.pow(1 - gt, beta) + * neg_inds + ) if weights is not None: - pos_loss = pos_loss*weights - #neg_loss = neg_loss*weights + pos_loss = pos_loss * weights + # neg_loss = neg_loss*weights if valid_mask is not None: - pos_loss = pos_loss*valid_mask - neg_loss = neg_loss*valid_mask + pos_loss = pos_loss * valid_mask + neg_loss = neg_loss * valid_mask pos_loss = pos_loss.sum() neg_loss = neg_loss.sum() - num_pos = pos_inds.float().sum() + num_pos = pos_inds.float().sum() if num_pos == 0: loss = -neg_loss else: @@ -47,10 +54,10 @@ def focal_loss(pred, gt, weights=None, valid_mask=None): def mse_loss(pred, gt, weights=None, valid_mask=None): """ - Mean squared error loss. + Mean squared error loss. """ if valid_mask is None: - op = ((gt-pred)**2).mean() + op = ((gt - pred) ** 2).mean() else: - op = (valid_mask*((gt-pred)**2)).sum() / valid_mask.sum() + op = (valid_mask * ((gt - pred) ** 2)).sum() / valid_mask.sum() return op diff --git a/bat_detect/train/train_model.py b/bat_detect/train/train_model.py index d955216..1f4ea5f 100644 --- a/bat_detect/train/train_model.py +++ b/bat_detect/train/train_model.py @@ -1,32 +1,27 @@ -import numpy as np -import matplotlib.pyplot as plt -import os -import torch -import torch.nn.functional as F -from torch.optim.lr_scheduler import CosineAnnealingLR -import json import argparse +import json +import warnings -import sys -sys.path.append(os.path.join('..', '..')) +import matplotlib.pyplot as plt +import numpy as np +import torch +from torch.optim.lr_scheduler import CosineAnnealingLR -import bat_detect.detector.parameters as parameters -import bat_detect.detector.models as models +from bat_detect.detector import models +from bat_detect.detector import parameters +from bat_detect.train import losses import bat_detect.detector.post_process as pp -import bat_detect.utils.plot_utils as pu - import bat_detect.train.audio_dataloader as adl import bat_detect.train.evaluate as evl -import bat_detect.train.train_utils as tu import bat_detect.train.train_split as ts -import bat_detect.train.losses as losses +import bat_detect.train.train_utils as tu +import bat_detect.utils.plot_utils as pu -import warnings warnings.filterwarnings("ignore", category=UserWarning) def save_images_batch(model, data_loader, params): - print('\nsaving images ...') + print("\nsaving images ...") is_train_state = data_loader.dataset.is_train data_loader.dataset.is_train = False @@ -36,67 +31,112 @@ def save_images_batch(model, data_loader, params): ind = 0 # first image in each batch with torch.no_grad(): for batch_idx, inputs in enumerate(data_loader): - data = inputs['spec'].to(params['device']) + data = inputs["spec"].to(params["device"]) outputs = model(data) - spec_viz = inputs['spec_for_viz'].data.cpu().numpy() - orig_index = inputs['file_id'][ind] - plot_title = data_loader.dataset.data_anns[orig_index]['id'] - op_file_name = params['op_im_dir_test'] + data_loader.dataset.data_anns[orig_index]['id'] + '.jpg' - save_image(spec_viz, outputs, ind, inputs, params, op_file_name, plot_title) + spec_viz = inputs["spec_for_viz"].data.cpu().numpy() + orig_index = inputs["file_id"][ind] + plot_title = data_loader.dataset.data_anns[orig_index]["id"] + op_file_name = ( + params["op_im_dir_test"] + + data_loader.dataset.data_anns[orig_index]["id"] + + ".jpg" + ) + save_image( + spec_viz, + outputs, + ind, + inputs, + params, + op_file_name, + plot_title, + ) data_loader.dataset.is_train = is_train_state data_loader.dataset.return_spec_for_viz = False -def save_image(spec_viz, outputs, ind, inputs, params, op_file_name, plot_title): - pred_nms, _ = pp.run_nms(outputs, params, inputs['sampling_rate'].float()) - pred_hm = outputs['pred_det'][ind, 0, :].data.cpu().numpy() +def save_image( + spec_viz, outputs, ind, inputs, params, op_file_name, plot_title +): + pred_nms, _ = pp.run_nms(outputs, params, inputs["sampling_rate"].float()) + pred_hm = outputs["pred_det"][ind, 0, :].data.cpu().numpy() spec_viz = spec_viz[ind, 0, :] - gt = parse_gt_data(inputs)[ind] - sampling_rate = inputs['sampling_rate'][ind].item() - duration = inputs['duration'][ind].item() + gt = parse_gt_data(inputs)[ind] + sampling_rate = inputs["sampling_rate"][ind].item() + duration = inputs["duration"][ind].item() - pu.plot_spec(spec_viz, sampling_rate, duration, gt, pred_nms[ind], - params, plot_title, op_file_name, pred_hm, plot_boxes=True, fixed_aspect=False) + pu.plot_spec( + spec_viz, + sampling_rate, + duration, + gt, + pred_nms[ind], + params, + plot_title, + op_file_name, + pred_hm, + plot_boxes=True, + fixed_aspect=False, + ) -def loss_fun(outputs, gt_det, gt_size, gt_class, det_criterion, params, class_inv_freq): +def loss_fun( + outputs, gt_det, gt_size, gt_class, det_criterion, params, class_inv_freq +): # detection loss - loss = params['det_loss_weight']*det_criterion(outputs['pred_det'], gt_det) + loss = params["det_loss_weight"] * det_criterion( + outputs["pred_det"], gt_det + ) # bounding box size loss - loss += params['size_loss_weight']*losses.bbox_size_loss(outputs['pred_size'], gt_size) + loss += params["size_loss_weight"] * losses.bbox_size_loss( + outputs["pred_size"], gt_size + ) # classification loss valid_mask = (gt_class[:, :-1, :, :].sum(1) > 0).float().unsqueeze(1) - p_class = outputs['pred_class'][:, :-1, :] - loss += params['class_loss_weight']*det_criterion(p_class, gt_class[:, :-1, :], valid_mask=valid_mask) + p_class = outputs["pred_class"][:, :-1, :] + loss += params["class_loss_weight"] * det_criterion( + p_class, gt_class[:, :-1, :], valid_mask=valid_mask + ) return loss -def train(model, epoch, data_loader, det_criterion, optimizer, scheduler, params): +def train( + model, epoch, data_loader, det_criterion, optimizer, scheduler, params +): model.train() train_loss = tu.AverageMeter() - class_inv_freq = torch.from_numpy(np.array(params['class_inv_freq'], dtype=np.float32)).to(params['device']) + class_inv_freq = torch.from_numpy( + np.array(params["class_inv_freq"], dtype=np.float32) + ).to(params["device"]) class_inv_freq = class_inv_freq.unsqueeze(0).unsqueeze(2).unsqueeze(2) - print('\nEpoch', epoch) + print("\nEpoch", epoch) for batch_idx, inputs in enumerate(data_loader): - data = inputs['spec'].to(params['device']) - gt_det = inputs['y_2d_det'].to(params['device']) - gt_size = inputs['y_2d_size'].to(params['device']) - gt_class = inputs['y_2d_classes'].to(params['device']) + data = inputs["spec"].to(params["device"]) + gt_det = inputs["y_2d_det"].to(params["device"]) + gt_size = inputs["y_2d_size"].to(params["device"]) + gt_class = inputs["y_2d_classes"].to(params["device"]) optimizer.zero_grad() outputs = model(data) - loss = loss_fun(outputs, gt_det, gt_size, gt_class, det_criterion, params, class_inv_freq) + loss = loss_fun( + outputs, + gt_det, + gt_size, + gt_class, + det_criterion, + params, + class_inv_freq, + ) train_loss.update(loss.item(), data.shape[0]) loss.backward() @@ -104,13 +144,18 @@ def train(model, epoch, data_loader, det_criterion, optimizer, scheduler, params scheduler.step() if batch_idx % 50 == 0 and batch_idx != 0: - print('[{}/{}]\tLoss: {:.4f}'.format( - batch_idx * len(data), len(data_loader.dataset), train_loss.avg)) + print( + "[{}/{}]\tLoss: {:.4f}".format( + batch_idx * len(data), + len(data_loader.dataset), + train_loss.avg, + ) + ) - print('Train loss : {:.4f}'.format(train_loss.avg)) + print("Train loss : {:.4f}".format(train_loss.avg)) res = {} - res['train_loss'] = float(train_loss.avg) + res["train_loss"] = float(train_loss.avg) return res @@ -120,16 +165,18 @@ def test(model, epoch, data_loader, det_criterion, params): ground_truths = [] test_loss = tu.AverageMeter() - class_inv_freq = torch.from_numpy(np.array(params['class_inv_freq'], dtype=np.float32)).to(params['device']) + class_inv_freq = torch.from_numpy( + np.array(params["class_inv_freq"], dtype=np.float32) + ).to(params["device"]) class_inv_freq = class_inv_freq.unsqueeze(0).unsqueeze(2).unsqueeze(2) with torch.no_grad(): for batch_idx, inputs in enumerate(data_loader): - data = inputs['spec'].to(params['device']) - gt_det = inputs['y_2d_det'].to(params['device']) - gt_size = inputs['y_2d_size'].to(params['device']) - gt_class = inputs['y_2d_classes'].to(params['device']) + data = inputs["spec"].to(params["device"]) + gt_det = inputs["y_2d_det"].to(params["device"]) + gt_size = inputs["y_2d_size"].to(params["device"]) + gt_class = inputs["y_2d_classes"].to(params["device"]) outputs = model(data) @@ -139,41 +186,79 @@ def test(model, epoch, data_loader, det_criterion, params): # for kk in ['pred_det', 'pred_size', 'pred_class']: # outputs[kk] = torch.cat([oo for oo in outputs[kk]], 2).unsqueeze(0) - if params['save_test_image_during_train'] and batch_idx == 0: + if params["save_test_image_during_train"] and batch_idx == 0: # for visualization - save the first prediction ind = 0 - orig_index = inputs['file_id'][ind] - plot_title = data_loader.dataset.data_anns[orig_index]['id'] - op_file_name = params['op_im_dir'] + str(orig_index.item()).zfill(4) + '_' + str(epoch).zfill(4) + '_pred.jpg' - save_image(data, outputs, ind, inputs, params, op_file_name, plot_title) + orig_index = inputs["file_id"][ind] + plot_title = data_loader.dataset.data_anns[orig_index]["id"] + op_file_name = ( + params["op_im_dir"] + + str(orig_index.item()).zfill(4) + + "_" + + str(epoch).zfill(4) + + "_pred.jpg" + ) + save_image( + data, + outputs, + ind, + inputs, + params, + op_file_name, + plot_title, + ) - loss = loss_fun(outputs, gt_det, gt_size, gt_class, det_criterion, params, class_inv_freq) + loss = loss_fun( + outputs, + gt_det, + gt_size, + gt_class, + det_criterion, + params, + class_inv_freq, + ) test_loss.update(loss.item(), data.shape[0]) # do NMS - pred_nms, _ = pp.run_nms(outputs, params, inputs['sampling_rate'].float()) + pred_nms, _ = pp.run_nms( + outputs, params, inputs["sampling_rate"].float() + ) predictions.extend(pred_nms) ground_truths.extend(parse_gt_data(inputs)) - res_det = evl.evaluate_predictions(ground_truths, predictions, params['class_names'], - params['detection_overlap'], params['ignore_start_end']) + res_det = evl.evaluate_predictions( + ground_truths, + predictions, + params["class_names"], + params["detection_overlap"], + params["ignore_start_end"], + ) - print('\nTest loss : {:.4f}'.format(test_loss.avg)) - print('Rec at 0.95 (det) : {:.4f}'.format(res_det['rec_at_x'])) - print('Avg prec (cls) : {:.4f}'.format(res_det['avg_prec'])) - print('File acc (cls) : {:.2f} - for {} out of {}'.format(res_det['file_acc'], - res_det['num_valid_files'], res_det['num_total_files'])) - print('Cls Avg prec (cls) : {:.4f}'.format(res_det['avg_prec_class'])) + print("\nTest loss : {:.4f}".format(test_loss.avg)) + print("Rec at 0.95 (det) : {:.4f}".format(res_det["rec_at_x"])) + print("Avg prec (cls) : {:.4f}".format(res_det["avg_prec"])) + print( + "File acc (cls) : {:.2f} - for {} out of {}".format( + res_det["file_acc"], + res_det["num_valid_files"], + res_det["num_total_files"], + ) + ) + print("Cls Avg prec (cls) : {:.4f}".format(res_det["avg_prec_class"])) - print('\nPer class average precision') - str_len = np.max([len(rs['name']) for rs in res_det['class_pr']]) + 5 - for cc, rs in enumerate(res_det['class_pr']): - if rs['num_gt'] > 0: - print(str(cc).ljust(5) + rs['name'].ljust(str_len) + '{:.4f}'.format(rs['avg_prec'])) + print("\nPer class average precision") + str_len = np.max([len(rs["name"]) for rs in res_det["class_pr"]]) + 5 + for cc, rs in enumerate(res_det["class_pr"]): + if rs["num_gt"] > 0: + print( + str(cc).ljust(5) + + rs["name"].ljust(str_len) + + "{:.4f}".format(rs["avg_prec"]) + ) res = {} - res['test_loss'] = float(test_loss.avg) + res["test_loss"] = float(test_loss.avg) return res_det, res @@ -181,176 +266,287 @@ def test(model, epoch, data_loader, det_criterion, params): def parse_gt_data(inputs): # reads the torch arrays into a dictionary of numpy arrays, taking care to # remove padding data i.e. not valid ones - keys = ['start_times', 'end_times', 'low_freqs', 'high_freqs', 'class_ids', 'individual_ids'] + keys = [ + "start_times", + "end_times", + "low_freqs", + "high_freqs", + "class_ids", + "individual_ids", + ] batch_data = [] - for ind in range(inputs['start_times'].shape[0]): - is_valid = inputs['is_valid'][ind]==1 + for ind in range(inputs["start_times"].shape[0]): + is_valid = inputs["is_valid"][ind] == 1 gt = {} for kk in keys: gt[kk] = inputs[kk][ind][is_valid].numpy().astype(np.float32) - gt['duration'] = inputs['duration'][ind].item() - gt['file_id'] = inputs['file_id'][ind].item() - gt['class_id_file'] = inputs['class_id_file'][ind].item() + gt["duration"] = inputs["duration"][ind].item() + gt["file_id"] = inputs["file_id"][ind].item() + gt["class_id_file"] = inputs["class_id_file"][ind].item() batch_data.append(gt) return batch_data def select_model(params): - num_classes = len(params['class_names']) - if params['model_name'] == 'Net2DFast': - model = models.Net2DFast(params['num_filters'], num_classes=num_classes, - emb_dim=params['emb_dim'], ip_height=params['ip_height'], - resize_factor=params['resize_factor']) - elif params['model_name'] == 'Net2DFastNoAttn': - model = models.Net2DFastNoAttn(params['num_filters'], num_classes=num_classes, - emb_dim=params['emb_dim'], ip_height=params['ip_height'], - resize_factor=params['resize_factor']) - elif params['model_name'] == 'Net2DFastNoCoordConv': - model = models.Net2DFastNoCoordConv(params['num_filters'], num_classes=num_classes, - emb_dim=params['emb_dim'], ip_height=params['ip_height'], - resize_factor=params['resize_factor']) + num_classes = len(params["class_names"]) + if params["model_name"] == "Net2DFast": + model = models.Net2DFast( + params["num_filters"], + num_classes=num_classes, + emb_dim=params["emb_dim"], + ip_height=params["ip_height"], + resize_factor=params["resize_factor"], + ) + elif params["model_name"] == "Net2DFastNoAttn": + model = models.Net2DFastNoAttn( + params["num_filters"], + num_classes=num_classes, + emb_dim=params["emb_dim"], + ip_height=params["ip_height"], + resize_factor=params["resize_factor"], + ) + elif params["model_name"] == "Net2DFastNoCoordConv": + model = models.Net2DFastNoCoordConv( + params["num_filters"], + num_classes=num_classes, + emb_dim=params["emb_dim"], + ip_height=params["ip_height"], + resize_factor=params["resize_factor"], + ) else: - print('No valid network specified') + print("No valid network specified") return model if __name__ == "__main__": - plt.close('all') + plt.close("all") params = parameters.get_params(True) if torch.cuda.is_available(): - params['device'] = 'cuda' + params["device"] = "cuda" else: - params['device'] = 'cpu' + params["device"] = "cpu" # setup arg parser and populate it with exiting parameters - will not work with lists parser = argparse.ArgumentParser() - parser.add_argument('data_dir', type=str, - help='Path to root of datasets') - parser.add_argument('ann_dir', type=str, - help='Path to extracted annotations') - parser.add_argument('--train_split', type=str, default='diff', # diff, same - help='Which train split to use') - parser.add_argument('--notes', type=str, default='', - help='Notes to save in text file') - parser.add_argument('--do_not_save_images', action='store_false', - help='Do not save images at the end of training') - parser.add_argument('--standardize_classs_names_ip', type=str, - default='Rhinolophus ferrumequinum;Rhinolophus hipposideros', - help='Will set low and high frequency the same for these classes. Separate names with ";"') + parser.add_argument("data_dir", type=str, help="Path to root of datasets") + parser.add_argument( + "ann_dir", type=str, help="Path to extracted annotations" + ) + parser.add_argument( + "--train_split", + type=str, + default="diff", # diff, same + help="Which train split to use", + ) + parser.add_argument( + "--notes", type=str, default="", help="Notes to save in text file" + ) + parser.add_argument( + "--do_not_save_images", + action="store_false", + help="Do not save images at the end of training", + ) + parser.add_argument( + "--standardize_classs_names_ip", + type=str, + default="Rhinolophus ferrumequinum;Rhinolophus hipposideros", + help='Will set low and high frequency the same for these classes. Separate names with ";"', + ) for key, val in params.items(): - parser.add_argument('--'+key, type=type(val), default=val) + parser.add_argument("--" + key, type=type(val), default=val) params = vars(parser.parse_args()) # save notes file - if params['notes'] != '': - tu.write_notes_file(params['experiment'] + 'notes.txt', params['notes']) + if params["notes"] != "": + tu.write_notes_file(params["experiment"] + "notes.txt", params["notes"]) # load the training and test meta data - there are different splits defined - train_sets, test_sets = ts.get_train_test_data(params['ann_dir'], params['data_dir'], params['train_split']) - train_sets_no_path, test_sets_no_path = ts.get_train_test_data('', '', params['train_split']) + train_sets, test_sets = ts.get_train_test_data( + params["ann_dir"], params["data_dir"], params["train_split"] + ) + train_sets_no_path, test_sets_no_path = ts.get_train_test_data( + "", "", params["train_split"] + ) # keep track of what we have trained on - params['train_sets'] = train_sets_no_path - params['test_sets'] = test_sets_no_path + params["train_sets"] = train_sets_no_path + params["test_sets"] = test_sets_no_path # load train annotations - merge them all together - print('\nTraining on:') + print("\nTraining on:") for tt in train_sets: - print(tt['ann_path']) - classes_to_ignore = params['classes_to_ignore']+params['generic_class'] - data_train, params['class_names'], params['class_inv_freq'] = \ - tu.load_set_of_anns(train_sets, classes_to_ignore, params['events_of_interest'], params['convert_to_genus']) - params['genus_names'], params['genus_mapping'] = tu.get_genus_mapping(params['class_names']) - params['class_names_short'] = tu.get_short_class_names(params['class_names']) + print(tt["ann_path"]) + classes_to_ignore = params["classes_to_ignore"] + params["generic_class"] + ( + data_train, + params["class_names"], + params["class_inv_freq"], + ) = tu.load_set_of_anns( + train_sets, + classes_to_ignore, + params["events_of_interest"], + params["convert_to_genus"], + ) + params["genus_names"], params["genus_mapping"] = tu.get_genus_mapping( + params["class_names"] + ) + params["class_names_short"] = tu.get_short_class_names( + params["class_names"] + ) # standardize the low and high frequency value for specified classes - params['standardize_classs_names'] = params['standardize_classs_names_ip'].split(';') - for cc in params['standardize_classs_names']: - if cc in params['class_names']: + params["standardize_classs_names"] = params[ + "standardize_classs_names_ip" + ].split(";") + for cc in params["standardize_classs_names"]: + if cc in params["class_names"]: data_train = tu.standardize_low_freq(data_train, cc) else: - print(cc, 'not found') + print(cc, "not found") # train loader train_dataset = adl.AudioLoader(data_train, params, is_train=True) - train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=params['batch_size'], - shuffle=True, num_workers=params['num_workers'], pin_memory=True) - + train_loader = torch.utils.data.DataLoader( + train_dataset, + batch_size=params["batch_size"], + shuffle=True, + num_workers=params["num_workers"], + pin_memory=True, + ) # test set - print('\nTesting on:') + print("\nTesting on:") for tt in test_sets: - print(tt['ann_path']) - data_test, _, _ = tu.load_set_of_anns(test_sets, classes_to_ignore, params['events_of_interest'], params['convert_to_genus']) + print(tt["ann_path"]) + data_test, _, _ = tu.load_set_of_anns( + test_sets, + classes_to_ignore, + params["events_of_interest"], + params["convert_to_genus"], + ) data_train = tu.remove_dupes(data_train, data_test) test_dataset = adl.AudioLoader(data_test, params, is_train=False) # batch size of 1 because of variable file length - test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, - shuffle=False, num_workers=params['num_workers'], pin_memory=True) - + test_loader = torch.utils.data.DataLoader( + test_dataset, + batch_size=1, + shuffle=False, + num_workers=params["num_workers"], + pin_memory=True, + ) inputs_train = next(iter(train_loader)) # TODO remove params['ip_height'], this is just legacy - params['ip_height'] = int(params['spec_height']*params['resize_factor']) - print('\ntrain batch spec size :', inputs_train['spec'].shape) - print('class target size :', inputs_train['y_2d_classes'].shape) + params["ip_height"] = int(params["spec_height"] * params["resize_factor"]) + print("\ntrain batch spec size :", inputs_train["spec"].shape) + print("class target size :", inputs_train["y_2d_classes"].shape) # select network model = select_model(params) - model = model.to(params['device']) + model = model.to(params["device"]) - optimizer = torch.optim.Adam(model.parameters(), lr=params['lr']) - #optimizer = torch.optim.SGD(model.parameters(), lr=params['lr'], momentum=0.9) - scheduler = CosineAnnealingLR(optimizer, params['num_epochs'] * len(train_loader)) - if params['train_loss'] == 'mse': + optimizer = torch.optim.Adam(model.parameters(), lr=params["lr"]) + # optimizer = torch.optim.SGD(model.parameters(), lr=params['lr'], momentum=0.9) + scheduler = CosineAnnealingLR( + optimizer, params["num_epochs"] * len(train_loader) + ) + if params["train_loss"] == "mse": det_criterion = losses.mse_loss - elif params['train_loss'] == 'focal': + elif params["train_loss"] == "focal": det_criterion = losses.focal_loss # save parameters to file - with open(params['experiment'] + 'params.json', 'w') as da: + with open(params["experiment"] + "params.json", "w") as da: json.dump(params, da, indent=2, sort_keys=True) # plotting - train_plt_ls = pu.LossPlotter(params['experiment'] + 'train_loss.png', params['num_epochs']+1, - ['train_loss'], None, None, ['epoch', 'train_loss'], logy=True) - test_plt_ls = pu.LossPlotter(params['experiment'] + 'test_loss.png', params['num_epochs']+1, - ['test_loss'], None, None, ['epoch', 'test_loss'], logy=True) - test_plt = pu.LossPlotter(params['experiment'] + 'test.png', params['num_epochs']+1, - ['avg_prec', 'rec_at_x', 'avg_prec_class', 'file_acc', 'top_class'], [0,1], None, ['epoch', '']) - test_plt_class = pu.LossPlotter(params['experiment'] + 'test_avg_prec.png', params['num_epochs']+1, - params['class_names_short'], [0,1], params['class_names_short'], ['epoch', 'avg_prec']) - + train_plt_ls = pu.LossPlotter( + params["experiment"] + "train_loss.png", + params["num_epochs"] + 1, + ["train_loss"], + None, + None, + ["epoch", "train_loss"], + logy=True, + ) + test_plt_ls = pu.LossPlotter( + params["experiment"] + "test_loss.png", + params["num_epochs"] + 1, + ["test_loss"], + None, + None, + ["epoch", "test_loss"], + logy=True, + ) + test_plt = pu.LossPlotter( + params["experiment"] + "test.png", + params["num_epochs"] + 1, + ["avg_prec", "rec_at_x", "avg_prec_class", "file_acc", "top_class"], + [0, 1], + None, + ["epoch", ""], + ) + test_plt_class = pu.LossPlotter( + params["experiment"] + "test_avg_prec.png", + params["num_epochs"] + 1, + params["class_names_short"], + [0, 1], + params["class_names_short"], + ["epoch", "avg_prec"], + ) # # main train loop - for epoch in range(0, params['num_epochs']+1): + for epoch in range(0, params["num_epochs"] + 1): - train_loss = train(model, epoch, train_loader, det_criterion, optimizer, scheduler, params) - train_plt_ls.update_and_save(epoch, [train_loss['train_loss']]) + train_loss = train( + model, + epoch, + train_loader, + det_criterion, + optimizer, + scheduler, + params, + ) + train_plt_ls.update_and_save(epoch, [train_loss["train_loss"]]) - if epoch % params['num_eval_epochs'] == 0: + if epoch % params["num_eval_epochs"] == 0: # detection accuracy on test set - test_res, test_loss = test(model, epoch, test_loader, det_criterion, params) - test_plt_ls.update_and_save(epoch, [test_loss['test_loss']]) - test_plt.update_and_save(epoch, [test_res['avg_prec'], test_res['rec_at_x'], - test_res['avg_prec_class'], test_res['file_acc'], test_res['top_class']['avg_prec']]) - test_plt_class.update_and_save(epoch, [rs['avg_prec'] for rs in test_res['class_pr']]) - pu.plot_pr_curve_class(params['experiment'] , 'test_pr', 'test_pr', test_res) - + test_res, test_loss = test( + model, epoch, test_loader, det_criterion, params + ) + test_plt_ls.update_and_save(epoch, [test_loss["test_loss"]]) + test_plt.update_and_save( + epoch, + [ + test_res["avg_prec"], + test_res["rec_at_x"], + test_res["avg_prec_class"], + test_res["file_acc"], + test_res["top_class"]["avg_prec"], + ], + ) + test_plt_class.update_and_save( + epoch, [rs["avg_prec"] for rs in test_res["class_pr"]] + ) + pu.plot_pr_curve_class( + params["experiment"], "test_pr", "test_pr", test_res + ) # save trained model - print('saving model to: ' + params['model_file_name']) - op_state = {'epoch': epoch + 1, - 'state_dict': model.state_dict(), - #'optimizer' : optimizer.state_dict(), - 'params' : params} - torch.save(op_state, params['model_file_name']) - + print("saving model to: " + params["model_file_name"]) + op_state = { + "epoch": epoch + 1, + "state_dict": model.state_dict(), + #'optimizer' : optimizer.state_dict(), + "params": params, + } + torch.save(op_state, params["model_file_name"]) # save an image with associated prediction for each batch in the test set - if not args['do_not_save_images']: - save_images_batch(model, test_loader, params) + # TODO: args variable does not exist + # if not args["do_not_save_images"]: + # save_images_batch(model, test_loader, params) diff --git a/bat_detect/train/train_split.py b/bat_detect/train/train_split.py index 20972bd..01b5c03 100644 --- a/bat_detect/train/train_split.py +++ b/bat_detect/train/train_split.py @@ -2,13 +2,14 @@ Run scripts/extract_anns.py to generate these json files. """ + def get_train_test_data(ann_dir, wav_dir, split_name, load_extra=True): - if split_name == 'diff': + if split_name == "diff": train_sets, test_sets = split_diff(ann_dir, wav_dir, load_extra) - elif split_name == 'same': + elif split_name == "same": train_sets, test_sets = split_same(ann_dir, wav_dir, load_extra) else: - print('Split not defined') + print("Split not defined") assert False return train_sets, test_sets @@ -18,73 +19,126 @@ def split_diff(ann_dir, wav_dir, load_extra=True): train_sets = [] if load_extra: - train_sets.append({'dataset_name': 'BatDetective', - 'is_test': False, - 'is_binary': True, # just a bat / not bat dataset ie no classes - 'ann_path': ann_dir + 'train_set_bulgaria_batdetective_with_bbs.json', - 'wav_path': wav_dir + 'bat_detective/audio/'}) - train_sets.append({'dataset_name': 'bat_logger_qeop_empty', - 'is_test': False, - 'is_binary': True, - 'ann_path': ann_dir + 'bat_logger_qeop_empty.json', - 'wav_path': wav_dir + 'bat_logger_qeop_empty/audio/'}) - train_sets.append({'dataset_name': 'bat_logger_2016_empty', - 'is_test': False, - 'is_binary': True, - 'ann_path': ann_dir + 'train_set_bat_logger_2016_empty.json', - 'wav_path': wav_dir + 'bat_logger_2016/audio/'}) + train_sets.append( + { + "dataset_name": "BatDetective", + "is_test": False, + "is_binary": True, # just a bat / not bat dataset ie no classes + "ann_path": ann_dir + + "train_set_bulgaria_batdetective_with_bbs.json", + "wav_path": wav_dir + "bat_detective/audio/", + } + ) + train_sets.append( + { + "dataset_name": "bat_logger_qeop_empty", + "is_test": False, + "is_binary": True, + "ann_path": ann_dir + "bat_logger_qeop_empty.json", + "wav_path": wav_dir + "bat_logger_qeop_empty/audio/", + } + ) + train_sets.append( + { + "dataset_name": "bat_logger_2016_empty", + "is_test": False, + "is_binary": True, + "ann_path": ann_dir + "train_set_bat_logger_2016_empty.json", + "wav_path": wav_dir + "bat_logger_2016/audio/", + } + ) # train_sets.append({'dataset_name': 'brazil_data_binary', # 'is_test': False, # 'ann_path': ann_dir + 'brazil_data_binary.json', # 'wav_path': wav_dir + 'brazil_data/audio/'}) - train_sets.append({'dataset_name': 'echobank', - 'is_test': False, - 'is_binary': False, - 'ann_path': ann_dir + 'Echobank_train_expert.json', - 'wav_path': wav_dir + 'echobank/audio/'}) - train_sets.append({'dataset_name': 'sn_scot_nor', - 'is_test': False, - 'is_binary': False, - 'ann_path': ann_dir + 'sn_scot_nor_0.5_expert.json', - 'wav_path': wav_dir + 'sn_scot_nor/audio/'}) - train_sets.append({'dataset_name': 'BCT_1_sec', - 'is_test': False, - 'is_binary': False, - 'ann_path': ann_dir + 'BCT_1_sec_train_expert.json', - 'wav_path': wav_dir + 'BCT_1_sec/audio/'}) - train_sets.append({'dataset_name': 'bcireland', - 'is_test': False, - 'is_binary': False, - 'ann_path': ann_dir + 'bcireland_expert.json', - 'wav_path': wav_dir + 'bcireland/audio/'}) - train_sets.append({'dataset_name': 'rhinolophus_steve_BCT', - 'is_test': False, - 'is_binary': False, - 'ann_path': ann_dir + 'rhinolophus_steve_BCT_expert.json', - 'wav_path': wav_dir + 'rhinolophus_steve_BCT/audio/'}) + train_sets.append( + { + "dataset_name": "echobank", + "is_test": False, + "is_binary": False, + "ann_path": ann_dir + "Echobank_train_expert.json", + "wav_path": wav_dir + "echobank/audio/", + } + ) + train_sets.append( + { + "dataset_name": "sn_scot_nor", + "is_test": False, + "is_binary": False, + "ann_path": ann_dir + "sn_scot_nor_0.5_expert.json", + "wav_path": wav_dir + "sn_scot_nor/audio/", + } + ) + train_sets.append( + { + "dataset_name": "BCT_1_sec", + "is_test": False, + "is_binary": False, + "ann_path": ann_dir + "BCT_1_sec_train_expert.json", + "wav_path": wav_dir + "BCT_1_sec/audio/", + } + ) + train_sets.append( + { + "dataset_name": "bcireland", + "is_test": False, + "is_binary": False, + "ann_path": ann_dir + "bcireland_expert.json", + "wav_path": wav_dir + "bcireland/audio/", + } + ) + train_sets.append( + { + "dataset_name": "rhinolophus_steve_BCT", + "is_test": False, + "is_binary": False, + "ann_path": ann_dir + "rhinolophus_steve_BCT_expert.json", + "wav_path": wav_dir + "rhinolophus_steve_BCT/audio/", + } + ) test_sets = [] - test_sets.append({'dataset_name': 'bat_data_martyn_2018', - 'is_test': True, - 'is_binary': False, - 'ann_path': ann_dir + 'BritishBatCalls_MartynCooke_2018_1_sec_train_expert.json', - 'wav_path': wav_dir + 'bat_data_martyn_2018/audio/'}) - test_sets.append({'dataset_name': 'bat_data_martyn_2018_test', - 'is_test': True, - 'is_binary': False, - 'ann_path': ann_dir + 'BritishBatCalls_MartynCooke_2018_1_sec_test_expert.json', - 'wav_path': wav_dir + 'bat_data_martyn_2018_test/audio/'}) - test_sets.append({'dataset_name': 'bat_data_martyn_2019', - 'is_test': True, - 'is_binary': False, - 'ann_path': ann_dir + 'BritishBatCalls_MartynCooke_2019_1_sec_train_expert.json', - 'wav_path': wav_dir + 'bat_data_martyn_2019/audio/'}) - test_sets.append({'dataset_name': 'bat_data_martyn_2019_test', - 'is_test': True, - 'is_binary': False, - 'ann_path': ann_dir + 'BritishBatCalls_MartynCooke_2019_1_sec_test_expert.json', - 'wav_path': wav_dir + 'bat_data_martyn_2019_test/audio/'}) + test_sets.append( + { + "dataset_name": "bat_data_martyn_2018", + "is_test": True, + "is_binary": False, + "ann_path": ann_dir + + "BritishBatCalls_MartynCooke_2018_1_sec_train_expert.json", + "wav_path": wav_dir + "bat_data_martyn_2018/audio/", + } + ) + test_sets.append( + { + "dataset_name": "bat_data_martyn_2018_test", + "is_test": True, + "is_binary": False, + "ann_path": ann_dir + + "BritishBatCalls_MartynCooke_2018_1_sec_test_expert.json", + "wav_path": wav_dir + "bat_data_martyn_2018_test/audio/", + } + ) + test_sets.append( + { + "dataset_name": "bat_data_martyn_2019", + "is_test": True, + "is_binary": False, + "ann_path": ann_dir + + "BritishBatCalls_MartynCooke_2019_1_sec_train_expert.json", + "wav_path": wav_dir + "bat_data_martyn_2019/audio/", + } + ) + test_sets.append( + { + "dataset_name": "bat_data_martyn_2019_test", + "is_test": True, + "is_binary": False, + "ann_path": ann_dir + + "BritishBatCalls_MartynCooke_2019_1_sec_test_expert.json", + "wav_path": wav_dir + "bat_data_martyn_2019_test/audio/", + } + ) return train_sets, test_sets @@ -93,71 +147,124 @@ def split_same(ann_dir, wav_dir, load_extra=True): train_sets = [] if load_extra: - train_sets.append({'dataset_name': 'BatDetective', - 'is_test': False, - 'is_binary': True, - 'ann_path': ann_dir + 'train_set_bulgaria_batdetective_with_bbs.json', - 'wav_path': wav_dir + 'bat_detective/audio/'}) - train_sets.append({'dataset_name': 'bat_logger_qeop_empty', - 'is_test': False, - 'is_binary': True, - 'ann_path': ann_dir + 'bat_logger_qeop_empty.json', - 'wav_path': wav_dir + 'bat_logger_qeop_empty/audio/'}) - train_sets.append({'dataset_name': 'bat_logger_2016_empty', - 'is_test': False, - 'is_binary': True, - 'ann_path': ann_dir + 'train_set_bat_logger_2016_empty.json', - 'wav_path': wav_dir + 'bat_logger_2016/audio/'}) + train_sets.append( + { + "dataset_name": "BatDetective", + "is_test": False, + "is_binary": True, + "ann_path": ann_dir + + "train_set_bulgaria_batdetective_with_bbs.json", + "wav_path": wav_dir + "bat_detective/audio/", + } + ) + train_sets.append( + { + "dataset_name": "bat_logger_qeop_empty", + "is_test": False, + "is_binary": True, + "ann_path": ann_dir + "bat_logger_qeop_empty.json", + "wav_path": wav_dir + "bat_logger_qeop_empty/audio/", + } + ) + train_sets.append( + { + "dataset_name": "bat_logger_2016_empty", + "is_test": False, + "is_binary": True, + "ann_path": ann_dir + "train_set_bat_logger_2016_empty.json", + "wav_path": wav_dir + "bat_logger_2016/audio/", + } + ) # train_sets.append({'dataset_name': 'brazil_data_binary', # 'is_test': False, # 'ann_path': ann_dir + 'brazil_data_binary.json', # 'wav_path': wav_dir + 'brazil_data/audio/'}) - train_sets.append({'dataset_name': 'echobank', - 'is_test': False, - 'is_binary': False, - 'ann_path': ann_dir + 'Echobank_train_expert_TRAIN.json', - 'wav_path': wav_dir + 'echobank/audio/'}) - train_sets.append({'dataset_name': 'sn_scot_nor', - 'is_test': False, - 'is_binary': False, - 'ann_path': ann_dir + 'sn_scot_nor_0.5_expert_TRAIN.json', - 'wav_path': wav_dir + 'sn_scot_nor/audio/'}) - train_sets.append({'dataset_name': 'BCT_1_sec', - 'is_test': False, - 'is_binary': False, - 'ann_path': ann_dir + 'BCT_1_sec_train_expert_TRAIN.json', - 'wav_path': wav_dir + 'BCT_1_sec/audio/'}) - train_sets.append({'dataset_name': 'bcireland', - 'is_test': False, - 'is_binary': False, - 'ann_path': ann_dir + 'bcireland_expert_TRAIN.json', - 'wav_path': wav_dir + 'bcireland/audio/'}) - train_sets.append({'dataset_name': 'rhinolophus_steve_BCT', - 'is_test': False, - 'is_binary': False, - 'ann_path': ann_dir + 'rhinolophus_steve_BCT_expert_TRAIN.json', - 'wav_path': wav_dir + 'rhinolophus_steve_BCT/audio/'}) - train_sets.append({'dataset_name': 'bat_data_martyn_2018', - 'is_test': False, - 'is_binary': False, - 'ann_path': ann_dir + 'BritishBatCalls_MartynCooke_2018_1_sec_train_expert_TRAIN.json', - 'wav_path': wav_dir + 'bat_data_martyn_2018/audio/'}) - train_sets.append({'dataset_name': 'bat_data_martyn_2018_test', - 'is_test': False, - 'is_binary': False, - 'ann_path': ann_dir + 'BritishBatCalls_MartynCooke_2018_1_sec_test_expert_TRAIN.json', - 'wav_path': wav_dir + 'bat_data_martyn_2018_test/audio/'}) - train_sets.append({'dataset_name': 'bat_data_martyn_2019', - 'is_test': False, - 'is_binary': False, - 'ann_path': ann_dir + 'BritishBatCalls_MartynCooke_2019_1_sec_train_expert_TRAIN.json', - 'wav_path': wav_dir + 'bat_data_martyn_2019/audio/'}) - train_sets.append({'dataset_name': 'bat_data_martyn_2019_test', - 'is_test': False, - 'is_binary': False, - 'ann_path': ann_dir + 'BritishBatCalls_MartynCooke_2019_1_sec_test_expert_TRAIN.json', - 'wav_path': wav_dir + 'bat_data_martyn_2019_test/audio/'}) + train_sets.append( + { + "dataset_name": "echobank", + "is_test": False, + "is_binary": False, + "ann_path": ann_dir + "Echobank_train_expert_TRAIN.json", + "wav_path": wav_dir + "echobank/audio/", + } + ) + train_sets.append( + { + "dataset_name": "sn_scot_nor", + "is_test": False, + "is_binary": False, + "ann_path": ann_dir + "sn_scot_nor_0.5_expert_TRAIN.json", + "wav_path": wav_dir + "sn_scot_nor/audio/", + } + ) + train_sets.append( + { + "dataset_name": "BCT_1_sec", + "is_test": False, + "is_binary": False, + "ann_path": ann_dir + "BCT_1_sec_train_expert_TRAIN.json", + "wav_path": wav_dir + "BCT_1_sec/audio/", + } + ) + train_sets.append( + { + "dataset_name": "bcireland", + "is_test": False, + "is_binary": False, + "ann_path": ann_dir + "bcireland_expert_TRAIN.json", + "wav_path": wav_dir + "bcireland/audio/", + } + ) + train_sets.append( + { + "dataset_name": "rhinolophus_steve_BCT", + "is_test": False, + "is_binary": False, + "ann_path": ann_dir + "rhinolophus_steve_BCT_expert_TRAIN.json", + "wav_path": wav_dir + "rhinolophus_steve_BCT/audio/", + } + ) + train_sets.append( + { + "dataset_name": "bat_data_martyn_2018", + "is_test": False, + "is_binary": False, + "ann_path": ann_dir + + "BritishBatCalls_MartynCooke_2018_1_sec_train_expert_TRAIN.json", + "wav_path": wav_dir + "bat_data_martyn_2018/audio/", + } + ) + train_sets.append( + { + "dataset_name": "bat_data_martyn_2018_test", + "is_test": False, + "is_binary": False, + "ann_path": ann_dir + + "BritishBatCalls_MartynCooke_2018_1_sec_test_expert_TRAIN.json", + "wav_path": wav_dir + "bat_data_martyn_2018_test/audio/", + } + ) + train_sets.append( + { + "dataset_name": "bat_data_martyn_2019", + "is_test": False, + "is_binary": False, + "ann_path": ann_dir + + "BritishBatCalls_MartynCooke_2019_1_sec_train_expert_TRAIN.json", + "wav_path": wav_dir + "bat_data_martyn_2019/audio/", + } + ) + train_sets.append( + { + "dataset_name": "bat_data_martyn_2019_test", + "is_test": False, + "is_binary": False, + "ann_path": ann_dir + + "BritishBatCalls_MartynCooke_2019_1_sec_test_expert_TRAIN.json", + "wav_path": wav_dir + "bat_data_martyn_2019_test/audio/", + } + ) # train_sets.append({'dataset_name': 'bat_data_martyn_2021_train', # 'is_test': False, @@ -171,51 +278,91 @@ def split_same(ann_dir, wav_dir, load_extra=True): # 'wav_path': wav_dir + 'volunteers_2021/audio/'}) test_sets = [] - test_sets.append({'dataset_name': 'echobank', - 'is_test': True, - 'is_binary': False, - 'ann_path': ann_dir + 'Echobank_train_expert_TEST.json', - 'wav_path': wav_dir + 'echobank/audio/'}) - test_sets.append({'dataset_name': 'sn_scot_nor', - 'is_test': True, - 'is_binary': False, - 'ann_path': ann_dir + 'sn_scot_nor_0.5_expert_TEST.json', - 'wav_path': wav_dir + 'sn_scot_nor/audio/'}) - test_sets.append({'dataset_name': 'BCT_1_sec', - 'is_test': True, - 'is_binary': False, - 'ann_path': ann_dir + 'BCT_1_sec_train_expert_TEST.json', - 'wav_path': wav_dir + 'BCT_1_sec/audio/'}) - test_sets.append({'dataset_name': 'bcireland', - 'is_test': True, - 'is_binary': False, - 'ann_path': ann_dir + 'bcireland_expert_TEST.json', - 'wav_path': wav_dir + 'bcireland/audio/'}) - test_sets.append({'dataset_name': 'rhinolophus_steve_BCT', - 'is_test': True, - 'is_binary': False, - 'ann_path': ann_dir + 'rhinolophus_steve_BCT_expert_TEST.json', - 'wav_path': wav_dir + 'rhinolophus_steve_BCT/audio/'}) - test_sets.append({'dataset_name': 'bat_data_martyn_2018', - 'is_test': True, - 'is_binary': False, - 'ann_path': ann_dir + 'BritishBatCalls_MartynCooke_2018_1_sec_train_expert_TEST.json', - 'wav_path': wav_dir + 'bat_data_martyn_2018/audio/'}) - test_sets.append({'dataset_name': 'bat_data_martyn_2018_test', - 'is_test': True, - 'is_binary': False, - 'ann_path': ann_dir + 'BritishBatCalls_MartynCooke_2018_1_sec_test_expert_TEST.json', - 'wav_path': wav_dir + 'bat_data_martyn_2018_test/audio/'}) - test_sets.append({'dataset_name': 'bat_data_martyn_2019', - 'is_test': True, - 'is_binary': False, - 'ann_path': ann_dir + 'BritishBatCalls_MartynCooke_2019_1_sec_train_expert_TEST.json', - 'wav_path': wav_dir + 'bat_data_martyn_2019/audio/'}) - test_sets.append({'dataset_name': 'bat_data_martyn_2019_test', - 'is_test': True, - 'is_binary': False, - 'ann_path': ann_dir + 'BritishBatCalls_MartynCooke_2019_1_sec_test_expert_TEST.json', - 'wav_path': wav_dir + 'bat_data_martyn_2019_test/audio/'}) + test_sets.append( + { + "dataset_name": "echobank", + "is_test": True, + "is_binary": False, + "ann_path": ann_dir + "Echobank_train_expert_TEST.json", + "wav_path": wav_dir + "echobank/audio/", + } + ) + test_sets.append( + { + "dataset_name": "sn_scot_nor", + "is_test": True, + "is_binary": False, + "ann_path": ann_dir + "sn_scot_nor_0.5_expert_TEST.json", + "wav_path": wav_dir + "sn_scot_nor/audio/", + } + ) + test_sets.append( + { + "dataset_name": "BCT_1_sec", + "is_test": True, + "is_binary": False, + "ann_path": ann_dir + "BCT_1_sec_train_expert_TEST.json", + "wav_path": wav_dir + "BCT_1_sec/audio/", + } + ) + test_sets.append( + { + "dataset_name": "bcireland", + "is_test": True, + "is_binary": False, + "ann_path": ann_dir + "bcireland_expert_TEST.json", + "wav_path": wav_dir + "bcireland/audio/", + } + ) + test_sets.append( + { + "dataset_name": "rhinolophus_steve_BCT", + "is_test": True, + "is_binary": False, + "ann_path": ann_dir + "rhinolophus_steve_BCT_expert_TEST.json", + "wav_path": wav_dir + "rhinolophus_steve_BCT/audio/", + } + ) + test_sets.append( + { + "dataset_name": "bat_data_martyn_2018", + "is_test": True, + "is_binary": False, + "ann_path": ann_dir + + "BritishBatCalls_MartynCooke_2018_1_sec_train_expert_TEST.json", + "wav_path": wav_dir + "bat_data_martyn_2018/audio/", + } + ) + test_sets.append( + { + "dataset_name": "bat_data_martyn_2018_test", + "is_test": True, + "is_binary": False, + "ann_path": ann_dir + + "BritishBatCalls_MartynCooke_2018_1_sec_test_expert_TEST.json", + "wav_path": wav_dir + "bat_data_martyn_2018_test/audio/", + } + ) + test_sets.append( + { + "dataset_name": "bat_data_martyn_2019", + "is_test": True, + "is_binary": False, + "ann_path": ann_dir + + "BritishBatCalls_MartynCooke_2019_1_sec_train_expert_TEST.json", + "wav_path": wav_dir + "bat_data_martyn_2019/audio/", + } + ) + test_sets.append( + { + "dataset_name": "bat_data_martyn_2019_test", + "is_test": True, + "is_binary": False, + "ann_path": ann_dir + + "BritishBatCalls_MartynCooke_2019_1_sec_test_expert_TEST.json", + "wav_path": wav_dir + "bat_data_martyn_2019_test/audio/", + } + ) # test_sets.append({'dataset_name': 'bat_data_martyn_2021_test', # 'is_test': True, diff --git a/bat_detect/train/train_utils.py b/bat_detect/train/train_utils.py index cff92e4..62441a7 100644 --- a/bat_detect/train/train_utils.py +++ b/bat_detect/train/train_utils.py @@ -1,42 +1,52 @@ -import numpy as np -import random -import os import glob import json +import os +import random + +import numpy as np def write_notes_file(file_name, text): - with open(file_name, 'a') as da: - da.write(text + '\n') + with open(file_name, "a") as da: + da.write(text + "\n") def get_blank_dataset_dict(dataset_name, is_test, ann_path, wav_path): - ddict = {'dataset_name': dataset_name, 'is_test': is_test, 'is_binary': False, - 'ann_path': ann_path, 'wav_path': wav_path} + ddict = { + "dataset_name": dataset_name, + "is_test": is_test, + "is_binary": False, + "ann_path": ann_path, + "wav_path": wav_path, + } return ddict def get_short_class_names(class_names, str_len=3): class_names_short = [] for cc in class_names: - class_names_short.append(' '.join([sp[:str_len] for sp in cc.split(' ')])) + class_names_short.append( + " ".join([sp[:str_len] for sp in cc.split(" ")]) + ) return class_names_short def remove_dupes(data_train, data_test): - test_ids = [dd['id'] for dd in data_test] + test_ids = [dd["id"] for dd in data_test] data_train_prune = [] for aa in data_train: - if aa['id'] not in test_ids: + if aa["id"] not in test_ids: data_train_prune.append(aa) diff = len(data_train) - len(data_train_prune) if diff != 0: - print(diff, 'items removed from train set') + print(diff, "items removed from train set") return data_train_prune def get_genus_mapping(class_names): - genus_names, genus_mapping = np.unique([cc.split(' ')[0] for cc in class_names], return_inverse=True) + genus_names, genus_mapping = np.unique( + [cc.split(" ")[0] for cc in class_names], return_inverse=True + ) return genus_names.tolist(), genus_mapping.tolist() @@ -47,97 +57,110 @@ def standardize_low_freq(data, class_of_interest): low_freqs = [] high_freqs = [] for dd in data: - for aa in dd['annotation']: - if aa['class'] == class_of_interest: - low_freqs.append(aa['low_freq']) - high_freqs.append(aa['high_freq']) + for aa in dd["annotation"]: + if aa["class"] == class_of_interest: + low_freqs.append(aa["low_freq"]) + high_freqs.append(aa["high_freq"]) low_mean = np.mean(low_freqs) high_mean = np.mean(high_freqs) - assert(low_mean < high_mean) + assert low_mean < high_mean - print('\nStandardizing low and high frequency for:') + print("\nStandardizing low and high frequency for:") print(class_of_interest) - print('low: ', round(low_mean, 2)) - print('high: ', round(high_mean, 2)) + print("low: ", round(low_mean, 2)) + print("high: ", round(high_mean, 2)) # only set the low freq, high stays the same # assumes that low_mean < high_mean for dd in data: - for aa in dd['annotation']: - if aa['class'] == class_of_interest: - aa['low_freq'] = low_mean - if aa['high_freq'] < low_mean: - aa['high_freq'] = high_mean + for aa in dd["annotation"]: + if aa["class"] == class_of_interest: + aa["low_freq"] = low_mean + if aa["high_freq"] < low_mean: + aa["high_freq"] = high_mean return data -def load_set_of_anns(data, classes_to_ignore=[], events_of_interest=None, - convert_to_genus=False, verbose=True, list_of_anns=False, - filter_issues=False, name_replace=False): +def load_set_of_anns( + data, + classes_to_ignore=[], + events_of_interest=None, + convert_to_genus=False, + verbose=True, + list_of_anns=False, + filter_issues=False, + name_replace=False, +): # load the annotations anns = [] if list_of_anns: # path to list of individual json files - anns.extend(load_anns_from_path(data['ann_path'], data['wav_path'])) + anns.extend(load_anns_from_path(data["ann_path"], data["wav_path"])) else: # dictionary of datasets for dd in data: - anns.extend(load_anns(dd['ann_path'], dd['wav_path'])) + anns.extend(load_anns(dd["ann_path"], dd["wav_path"])) # discarding unannoated files - anns = [aa for aa in anns if aa['annotated'] is True] + anns = [aa for aa in anns if aa["annotated"] is True] # filter files that have annotation issues - is the input is a dictionary of # datasets, this will lilely have already been done if filter_issues: - anns = [aa for aa in anns if aa['issues'] is False] + anns = [aa for aa in anns if aa["issues"] is False] # check for some basic formatting errors with class names for ann in anns: - for aa in ann['annotation']: - aa['class'] = aa['class'].strip() + for aa in ann["annotation"]: + aa["class"] = aa["class"].strip() # only load specified events - i.e. types of calls if events_of_interest is not None: for ann in anns: filtered_events = [] - for aa in ann['annotation']: - if aa['event'] in events_of_interest: + for aa in ann["annotation"]: + if aa["event"] in events_of_interest: filtered_events.append(aa) - ann['annotation'] = filtered_events + ann["annotation"] = filtered_events # change class names # replace_names will be a dictionary mapping input name to output if type(name_replace) is dict: for ann in anns: - for aa in ann['annotation']: - if aa['class'] in name_replace: - aa['class'] = name_replace[aa['class']] + for aa in ann["annotation"]: + if aa["class"] in name_replace: + aa["class"] = name_replace[aa["class"]] # convert everything to genus name if convert_to_genus: for ann in anns: - for aa in ann['annotation']: - aa['class'] = aa['class'].split(' ')[0] + for aa in ann["annotation"]: + aa["class"] = aa["class"].split(" ")[0] # get unique class names class_names_all = [] for ann in anns: - for aa in ann['annotation']: - if aa['class'] not in classes_to_ignore: - class_names_all.append(aa['class']) + for aa in ann["annotation"]: + if aa["class"] not in classes_to_ignore: + class_names_all.append(aa["class"]) class_names, class_cnts = np.unique(class_names_all, return_counts=True) - class_inv_freq = (class_cnts.sum() / (len(class_names) * class_cnts.astype(np.float32))) + class_inv_freq = class_cnts.sum() / ( + len(class_names) * class_cnts.astype(np.float32) + ) if verbose: - print('Class count:') + print("Class count:") str_len = np.max([len(cc) for cc in class_names]) + 5 for cc in range(len(class_names)): - print(str(cc).ljust(5) + class_names[cc].ljust(str_len) + str(class_cnts[cc])) + print( + str(cc).ljust(5) + + class_names[cc].ljust(str_len) + + str(class_cnts[cc]) + ) if len(classes_to_ignore) == 0: return anns @@ -150,36 +173,37 @@ def load_anns(ann_file_name, raw_audio_dir): anns = json.load(da) for aa in anns: - aa['file_path'] = raw_audio_dir + aa['id'] + aa["file_path"] = raw_audio_dir + aa["id"] return anns def load_anns_from_path(ann_file_dir, raw_audio_dir): - files = glob.glob(ann_file_dir + '*.json') + files = glob.glob(ann_file_dir + "*.json") anns = [] for ff in files: with open(ff) as da: ann = json.load(da) - ann['file_path'] = raw_audio_dir + ann['id'] + ann["file_path"] = raw_audio_dir + ann["id"] anns.append(ann) return anns class AverageMeter(object): - """Computes and stores the average and current value""" - def __init__(self): - self.reset() + """Computes and stores the average and current value""" - def reset(self): - self.val = 0 - self.avg = 0 - self.sum = 0 - self.count = 0 + def __init__(self): + self.reset() - def update(self, val, n=1): - self.val = val - self.sum += val * n - self.count += n - self.avg = self.sum / self.count + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count diff --git a/bat_detect/types.py b/bat_detect/types.py new file mode 100644 index 0000000..3bc810b --- /dev/null +++ b/bat_detect/types.py @@ -0,0 +1,475 @@ +"""Types used in the code base.""" +from typing import List, NamedTuple, Optional + +import numpy as np +import torch + +try: + from typing import TypedDict +except ImportError: + from typing_extensions import TypedDict + + +try: + from typing import Protocol +except ImportError: + from typing_extensions import Protocol + + +try: + from typing import NotRequired +except ImportError: + from typing_extensions import NotRequired + + +__all__ = [ + "Annotation", + "DetectionModel", + "FileAnnotations", + "ModelOutput", + "ModelParameters", + "NonMaximumSuppressionConfig", + "PredictionResults", + "ProcessingConfiguration", + "ResultParams", + "RunResults", + "SpectrogramParameters", +] + + +class SpectrogramParameters(TypedDict): + """Parameters for generating spectrograms.""" + + fft_win_length: float + """Length of the FFT window in seconds.""" + + fft_overlap: float + """Percentage of overlap between FFT windows.""" + + spec_height: int + """Height of the spectrogram in pixels.""" + + resize_factor: float + """Factor to resize the spectrogram by.""" + + spec_divide_factor: int + """Factor to divide the spectrogram by.""" + + max_freq: int + """Maximum frequency to display in the spectrogram.""" + + min_freq: int + """Minimum frequency to display in the spectrogram.""" + + spec_scale: str + """Scale to use for the spectrogram.""" + + denoise_spec_avg: bool + """Whether to denoise the spectrogram by averaging.""" + + max_scale_spec: bool + """Whether to scale the spectrogram so that its max is 1.""" + + +class ModelParameters(TypedDict): + """Model parameters.""" + + model_name: str + """Model name.""" + + num_filters: int + """Number of filters.""" + + emb_dim: int + """Embedding dimension.""" + + ip_height: int + """Input height in pixels.""" + + resize_factor: float + """Resize factor.""" + + class_names: List[str] + """Class names. The model is trained to detect these classes.""" + + +DictWithClass = TypedDict("DictWithClass", {"class": str}) + + +class Annotation(DictWithClass): + """Format of annotations. + + This is the format of a single annotation as expected by the annotation + tool. + """ + + start_time: float + """Start time in seconds.""" + + end_time: float + """End time in seconds.""" + + low_freq: int + """Low frequency in Hz.""" + + high_freq: int + """High frequency in Hz.""" + + class_prob: float + """Probability of class assignment.""" + + det_prob: float + """Probability of detection.""" + + individual: str + """Individual ID.""" + + event: str + """Type of detected event.""" + + +class FileAnnotations(TypedDict): + """Format of results. + + This is the format of the results expected by the annotation tool. + """ + + id: str + """File ID.""" + + annotated: bool + """Whether file has been annotated.""" + + duration: float + """Duration of audio file.""" + + issues: bool + """Whether file has issues.""" + + time_exp: float + """Time expansion factor.""" + + class_name: str + """Class predicted at file level""" + + notes: str + """Notes of file.""" + + annotation: List[Annotation] + """List of annotations.""" + + +class RunResults(TypedDict): + """Run results.""" + + pred_dict: FileAnnotations + """Predictions in the format expected by the annotation tool.""" + + spec_feats: NotRequired[List[np.ndarray]] + """Spectrogram features.""" + + spec_feat_names: NotRequired[List[str]] + """Spectrogram feature names.""" + + cnn_feats: NotRequired[List[np.ndarray]] + """CNN features.""" + + cnn_feat_names: NotRequired[List[str]] + """CNN feature names.""" + + spec_slices: NotRequired[List[np.ndarray]] + """Spectrogram slices.""" + + +class ResultParams(TypedDict): + """Result parameters.""" + + class_names: List[str] + """Class names.""" + + spec_features: bool + """Whether to return spectrogram features.""" + + cnn_features: bool + """Whether to return CNN features.""" + + spec_slices: bool + """Whether to return spectrogram slices.""" + + +class ProcessingConfiguration(TypedDict): + """Parameters for processing audio files.""" + + # audio parameters + target_samp_rate: int + """Target sampling rate of the audio.""" + + fft_win_length: float + """Length of the FFT window in seconds.""" + + fft_overlap: float + """Length of the FFT window in samples.""" + + resize_factor: float + """Factor to resize the spectrogram by.""" + + spec_divide_factor: int + """Factor to divide the spectrogram by.""" + + spec_height: int + """Height of the spectrogram in pixels.""" + + spec_scale: str + """Scale to use for the spectrogram.""" + + denoise_spec_avg: bool + """Whether to denoise the spectrogram by averaging.""" + + max_scale_spec: bool + """Whether to scale the spectrogram so that its max is 1.""" + + scale_raw_audio: bool + """Whether to scale the raw audio to be between -1 and 1.""" + + class_names: List[str] + """Names of the classes the model can detect.""" + + detection_threshold: float + """Threshold for detection probability.""" + + time_expansion: Optional[float] + """Time expansion factor of the processed recordings.""" + + top_n: int + """Number of top detections to keep.""" + + return_raw_preds: bool + """Whether to return raw predictions.""" + + max_duration: Optional[float] + """Maximum duration of audio file to process in seconds.""" + + nms_kernel_size: int + """Size of the kernel for non-maximum suppression.""" + + max_freq: int + """Maximum frequency to consider in Hz.""" + + min_freq: int + """Minimum frequency to consider in Hz.""" + + nms_top_k_per_sec: float + """Number of top detections to keep per second.""" + + quiet: bool + """Whether to suppress output.""" + + chunk_size: float + """Size of chunks to process in seconds.""" + + cnn_features: bool + """Whether to return CNN features.""" + + spec_features: bool + """Whether to return spectrogram features.""" + + spec_slices: bool + """Whether to return spectrogram slices.""" + + +class ModelOutput(NamedTuple): + """Output of the detection model. + + Each of the tensors has a shape of + + `(batch_size, num_channels,spec_height, spec_width)`. + + Where `spec_height` and `spec_width` are the height and width of the + input spectrograms. + + They contain localised information of: + + 1. The probability of a bounding box detection at the given location. + 2. The predicted size of the bounding box at the given location. + 3. The probabilities of each class at the given location. + 4. Same as 3. but before softmax. + 5. Features used to make the predictions at the given location. + """ + + pred_det: torch.Tensor + """Tensor with predict detection probabilities.""" + + pred_size: torch.Tensor + """Tensor with predicted bounding box sizes.""" + + pred_class: torch.Tensor + """Tensor with predicted class probabilities.""" + + pred_class_un_norm: torch.Tensor + """Tensor with predicted class probabilities before softmax.""" + + features: torch.Tensor + """Tensor with intermediate features.""" + + +class PredictionResults(TypedDict): + """Results of the prediction. + + Each key is a list of length `num_detections` containing the + corresponding values for each detection. + """ + + det_probs: np.ndarray + """Detection probabilities.""" + + x_pos: np.ndarray + """X position of the detection in pixels.""" + + y_pos: np.ndarray + """Y position of the detection in pixels.""" + + bb_width: np.ndarray + """Width of the detection in pixels.""" + + bb_height: np.ndarray + """Height of the detection in pixels.""" + + start_times: np.ndarray + """Start times of the detections in seconds.""" + + end_times: np.ndarray + """End times of the detections in seconds.""" + + low_freqs: np.ndarray + """Low frequencies of the detections in Hz.""" + + high_freqs: np.ndarray + """High frequencies of the detections in Hz.""" + + class_probs: np.ndarray + """Class probabilities.""" + + +class DetectionModel(Protocol): + """Protocol for detection models. + + This protocol is used to define the interface for the detection models. + This allows us to use the same code for training and inference, even + though the models are different. + """ + + num_classes: int + """Number of classes the model can classify.""" + + emb_dim: int + """Dimension of the embedding vector.""" + + num_filts: int + """Number of filters in the model.""" + + resize_factor: float + """Factor by which the input is resized.""" + + ip_height_rs: int + """Height of the input image.""" + + def forward( + self, + ip: torch.Tensor, + return_feats: bool = False, + ) -> ModelOutput: + """Forward pass of the model.""" + ... + + def __call__( + self, + ip: torch.Tensor, + return_feats: bool = False, + ) -> ModelOutput: + """Forward pass of the model.""" + ... + + +class NonMaximumSuppressionConfig(TypedDict): + """Configuration for non-maximum suppression.""" + + nms_kernel_size: int + """Size of the kernel for non-maximum suppression.""" + + max_freq: int + """Maximum frequency to consider in Hz.""" + + min_freq: int + """Minimum frequency to consider in Hz.""" + + fft_win_length: float + """Length of the FFT window in seconds.""" + + fft_overlap: float + """Overlap of the FFT windows in seconds.""" + + resize_factor: float + """Factor by which the input was resized.""" + + nms_top_k_per_sec: float + """Number of top detections to keep per second.""" + + detection_threshold: float + """Threshold for detection probability.""" + + +class HeatmapParameters(TypedDict): + """Parameters that control the heatmap generation function.""" + + class_names: List[str] + + fft_win_length: float + """Length of the FFT window in seconds.""" + + fft_overlap: float + """Percentage of the FFT windows overlap.""" + + resize_factor: float + """Factor by which the input was resized.""" + + min_freq: int + """Minimum frequency to consider in Hz.""" + + max_freq: int + """Maximum frequency to consider in Hz.""" + + target_sigma: float + """Sigma for the Gaussian kernel. Controls the width of the points in + the heatmap.""" + + +class AnnotationGroup(TypedDict): + """Group of annotations. + + Each key is a numpy array of length `num_annotations` containing the + corresponding values for each annotation. + """ + + start_times: np.ndarray + """Start times of the annotations in seconds.""" + + end_times: np.ndarray + """End times of the annotations in seconds.""" + + low_freqs: np.ndarray + """Low frequencies of the annotations in Hz.""" + + high_freqs: np.ndarray + """High frequencies of the annotations in Hz.""" + + class_ids: np.ndarray + """Class IDs of the annotations.""" + + individual_ids: np.ndarray + """Individual IDs of the annotations.""" + + x_inds: NotRequired[np.ndarray] + """X coordinate of the annotations in the spectrogram.""" + + y_inds: NotRequired[np.ndarray] + """Y coordinate of the annotations in the spectrogram.""" diff --git a/bat_detect/utils/audio_utils.py b/bat_detect/utils/audio_utils.py index 4a18d74..ba12798 100644 --- a/bat_detect/utils/audio_utils.py +++ b/bat_detect/utils/audio_utils.py @@ -1,91 +1,207 @@ -import numpy as np -from . import wavfile import warnings -import torch +from typing import Optional, Tuple + import librosa +import librosa.core.spectrum +import numpy as np +import torch + +from bat_detect.detector.parameters import ( + DENOISE_SPEC_AVG, + DETECTION_THRESHOLD, + FFT_OVERLAP, + FFT_WIN_LENGTH_S, + MAX_FREQ_HZ, + MAX_SCALE_SPEC, + MIN_FREQ_HZ, + NMS_KERNEL_SIZE, + NMS_TOP_K_PER_SEC, + RESIZE_FACTOR, + SCALE_RAW_AUDIO, + SPEC_DIVIDE_FACTOR, + SPEC_HEIGHT, + SPEC_SCALE, +) + +from . import wavfile + +try: + from typing import TypedDict +except ImportError: + from typing_extensions import TypedDict + +__all__ = [ + "load_audio", + "generate_spectrogram", + "pad_audio", + "SpectrogramParameters", + "DEFAULT_SPECTROGRAM_PARAMETERS", +] def time_to_x_coords(time_in_file, sampling_rate, fft_win_length, fft_overlap): - nfft = np.floor(fft_win_length*sampling_rate) # int() uses floor - noverlap = np.floor(fft_overlap*nfft) - return (time_in_file*sampling_rate-noverlap) / (nfft - noverlap) + nfft = np.floor(fft_win_length * sampling_rate) # int() uses floor + noverlap = np.floor(fft_overlap * nfft) + return (time_in_file * sampling_rate - noverlap) / (nfft - noverlap) # NOTE this is also defined in post_process def x_coords_to_time(x_pos, sampling_rate, fft_win_length, fft_overlap): - nfft = np.floor(fft_win_length*sampling_rate) - noverlap = np.floor(fft_overlap*nfft) - return ((x_pos*(nfft - noverlap)) + noverlap) / sampling_rate - #return (1.0 - fft_overlap) * fft_win_length * (x_pos + 0.5) # 0.5 is for center of temporal window + nfft = np.floor(fft_win_length * sampling_rate) + noverlap = np.floor(fft_overlap * nfft) + return ((x_pos * (nfft - noverlap)) + noverlap) / sampling_rate + # return (1.0 - fft_overlap) * fft_win_length * (x_pos + 0.5) # 0.5 is for center of temporal window -def generate_spectrogram(audio, sampling_rate, params, return_spec_for_viz=False, check_spec_size=True): +def generate_spectrogram( + audio, + sampling_rate, + params, + return_spec_for_viz=False, + check_spec_size=True, +): # generate spectrogram - spec = gen_mag_spectrogram(audio, sampling_rate, params['fft_win_length'], params['fft_overlap']) + spec = gen_mag_spectrogram( + audio, + sampling_rate, + params["fft_win_length"], + params["fft_overlap"], + ) # crop to min/max freq - max_freq = round(params['max_freq']*params['fft_win_length']) - min_freq = round(params['min_freq']*params['fft_win_length']) + max_freq = round(params["max_freq"] * params["fft_win_length"]) + min_freq = round(params["min_freq"] * params["fft_win_length"]) if spec.shape[0] < max_freq: freq_pad = max_freq - spec.shape[0] - spec = np.vstack((np.zeros((freq_pad, spec.shape[1]), dtype=spec.dtype), spec)) - spec_cropped = spec[-max_freq:spec.shape[0]-min_freq, :] + spec = np.vstack( + (np.zeros((freq_pad, spec.shape[1]), dtype=spec.dtype), spec) + ) + spec_cropped = spec[-max_freq : spec.shape[0] - min_freq, :] - if params['spec_scale'] == 'log': - log_scaling = 2.0 * (1.0 / sampling_rate) * (1.0/(np.abs(np.hanning(int(params['fft_win_length']*sampling_rate)))**2).sum()) - #log_scaling = (1.0 / sampling_rate)*0.1 - #log_scaling = (1.0 / sampling_rate)*10e4 - spec = np.log1p(log_scaling*spec_cropped) - elif params['spec_scale'] == 'pcen': + if params["spec_scale"] == "log": + log_scaling = ( + 2.0 + * (1.0 / sampling_rate) + * ( + 1.0 + / ( + np.abs( + np.hanning( + int(params["fft_win_length"] * sampling_rate) + ) + ) + ** 2 + ).sum() + ) + ) + # log_scaling = (1.0 / sampling_rate)*0.1 + # log_scaling = (1.0 / sampling_rate)*10e4 + spec = np.log1p(log_scaling * spec_cropped) + elif params["spec_scale"] == "pcen": spec = pcen(spec_cropped, sampling_rate) - elif params['spec_scale'] == 'none': + + elif params["spec_scale"] == "none": pass - if params['denoise_spec_avg']: + if params["denoise_spec_avg"]: spec = spec - np.mean(spec, 1)[:, np.newaxis] spec.clip(min=0, out=spec) - if params['max_scale_spec']: + if params["max_scale_spec"]: spec = spec / (spec.max() + 10e-6) # needs to be divisible by specific factor - if not it should have been padded - #if check_spec_size: - #assert((int(spec.shape[0]*params['resize_factor']) % params['spec_divide_factor']) == 0) - #assert((int(spec.shape[1]*params['resize_factor']) % params['spec_divide_factor']) == 0) + # if check_spec_size: + # assert((int(spec.shape[0]*params['resize_factor']) % params['spec_divide_factor']) == 0) + # assert((int(spec.shape[1]*params['resize_factor']) % params['spec_divide_factor']) == 0) # for visualization purposes - use log scaled spectrogram if return_spec_for_viz: - log_scaling = 2.0 * (1.0 / sampling_rate) * (1.0/(np.abs(np.hanning(int(params['fft_win_length']*sampling_rate)))**2).sum()) - spec_for_viz = np.log1p(log_scaling*spec_cropped).astype(np.float32) + log_scaling = ( + 2.0 + * (1.0 / sampling_rate) + * ( + 1.0 + / ( + np.abs( + np.hanning( + int(params["fft_win_length"] * sampling_rate) + ) + ) + ** 2 + ).sum() + ) + ) + spec_for_viz = np.log1p(log_scaling * spec_cropped).astype(np.float32) else: spec_for_viz = None return spec, spec_for_viz -def load_audio_file(audio_file, time_exp_fact, target_samp_rate, scale=False, max_duration=False): +def load_audio( + audio_file: str, + time_exp_fact: float, + target_samp_rate: int, + scale: bool = False, + max_duration: Optional[float] = None, +) -> Tuple[int, np.ndarray]: + """Load an audio file and resample it to the target sampling rate. + + The audio is also scaled to [-1, 1] and clipped to the maximum duration. + Only mono files are supported. + + Args: + audio_file (str): Path to the audio file. + target_samp_rate (int): Target sampling rate. + scale (bool): Whether to scale the audio to [-1, 1]. + max_duration (float): Maximum duration of the audio in seconds. + + Returns: + sampling_rate: The sampling rate of the audio. + audio_raw: The audio signal in a numpy array. + + Raises: + ValueError: If the audio file is stereo. + + """ with warnings.catch_warnings(): - warnings.filterwarnings('ignore', category=wavfile.WavFileWarning) - #sampling_rate, audio_raw = wavfile.read(audio_file) - audio_raw, sampling_rate = librosa.load(audio_file, sr=None) + warnings.filterwarnings("ignore", category=wavfile.WavFileWarning) + # sampling_rate, audio_raw = wavfile.read(audio_file) + audio_raw, sampling_rate = librosa.load( + audio_file, + sr=None, + dtype=np.float32, + ) if len(audio_raw.shape) > 1: - raise Exception('Currently does not handle stereo files') + raise ValueError("Currently does not handle stereo files") + sampling_rate = sampling_rate * time_exp_fact # resample - need to do this after correcting for time expansion sampling_rate_old = sampling_rate sampling_rate = target_samp_rate - audio_raw = librosa.resample(audio_raw, orig_sr=sampling_rate_old, target_sr=sampling_rate, res_type='polyphase') + if sampling_rate_old != sampling_rate: + audio_raw = librosa.resample( + audio_raw, + orig_sr=sampling_rate_old, + target_sr=sampling_rate, + res_type="polyphase", + ) # clipping maximum duration - if max_duration is not False: - max_duration = np.minimum(int(sampling_rate*max_duration), audio_raw.shape[0]) + if max_duration is not None: + max_duration = int( + np.minimum( + int(sampling_rate * max_duration), + audio_raw.shape[0], + ) + ) audio_raw = audio_raw[:max_duration] - - # convert to float32 and scale - audio_raw = audio_raw.astype(np.float32) + + # scale to [-1, 1] if scale: audio_raw = audio_raw - audio_raw.mean() audio_raw = audio_raw / (np.abs(audio_raw).max() + 10e-6) @@ -93,38 +209,53 @@ def load_audio_file(audio_file, time_exp_fact, target_samp_rate, scale=False, ma return sampling_rate, audio_raw -def pad_audio(audio_raw, fs, ms, overlap_perc, resize_factor, divide_factor, fixed_width=None): +def pad_audio( + audio_raw, + fs, + ms, + overlap_perc, + resize_factor, + divide_factor, + fixed_width=None, +): # Adds zeros to the end of the raw data so that the generated sepctrogram # will be evenly divisible by `divide_factor` # Also deals with very short audio clips and fixed_width during training # This code could be clearer, clean up - nfft = int(ms*fs) - noverlap = int(overlap_perc*nfft) + nfft = int(ms * fs) + noverlap = int(overlap_perc * nfft) step = nfft - noverlap - min_size = int(divide_factor*(1.0/resize_factor)) - spec_width = ((audio_raw.shape[0]-noverlap)//step) + min_size = int(divide_factor * (1.0 / resize_factor)) + spec_width = (audio_raw.shape[0] - noverlap) // step spec_width_rs = spec_width * resize_factor if fixed_width is not None and spec_width < fixed_width: # too small # used during training to ensure all the batches are the same size - diff = fixed_width*step + noverlap - audio_raw.shape[0] - audio_raw = np.hstack((audio_raw, np.zeros(diff, dtype=audio_raw.dtype))) + diff = fixed_width * step + noverlap - audio_raw.shape[0] + audio_raw = np.hstack( + (audio_raw, np.zeros(diff, dtype=audio_raw.dtype)) + ) elif fixed_width is not None and spec_width > fixed_width: # too big # used during training to ensure all the batches are the same size - diff = fixed_width*step + noverlap - audio_raw.shape[0] + diff = fixed_width * step + noverlap - audio_raw.shape[0] audio_raw = audio_raw[:diff] - elif spec_width_rs < min_size or (np.floor(spec_width_rs) % divide_factor) != 0: + elif ( + spec_width_rs < min_size + or (np.floor(spec_width_rs) % divide_factor) != 0 + ): # need to be at least min_size div_amt = np.ceil(spec_width_rs / float(divide_factor)) div_amt = np.maximum(1, div_amt) - target_size = int(div_amt*divide_factor*(1.0/resize_factor)) - diff = target_size*step + noverlap - audio_raw.shape[0] - audio_raw = np.hstack((audio_raw, np.zeros(diff, dtype=audio_raw.dtype))) + target_size = int(div_amt * divide_factor * (1.0 / resize_factor)) + diff = target_size * step + noverlap - audio_raw.shape[0] + audio_raw = np.hstack( + (audio_raw, np.zeros(diff, dtype=audio_raw.dtype)) + ) return audio_raw @@ -133,14 +264,16 @@ def gen_mag_spectrogram(x, fs, ms, overlap_perc): # Computes magnitude spectrogram by specifying time. x = x.astype(np.float32) - nfft = int(ms*fs) - noverlap = int(overlap_perc*nfft) + nfft = int(ms * fs) + noverlap = int(overlap_perc * nfft) # window data step = nfft - noverlap # compute spec - spec, _ = librosa.core.spectrum._spectrogram(y=x, power=1, n_fft=nfft, hop_length=step, center=False) + spec, _ = librosa.core.spectrum._spectrogram( + y=x, power=1, n_fft=nfft, hop_length=step, center=False + ) # remove DC component and flip vertical orientation spec = np.flipud(spec[1:, :]) @@ -149,8 +282,8 @@ def gen_mag_spectrogram(x, fs, ms, overlap_perc): def gen_mag_spectrogram_pt(x, fs, ms, overlap_perc): - nfft = int(ms*fs) - nstep = round((1.0-overlap_perc)*nfft) + nfft = int(ms * fs) + nstep = round((1.0 - overlap_perc) * nfft) han_win = torch.hann_window(nfft, periodic=False).to(x.device) @@ -158,12 +291,14 @@ def gen_mag_spectrogram_pt(x, fs, ms, overlap_perc): spec = complex_spec.pow(2.0).sum(-1) # remove DC component and flip vertically - spec = torch.flipud(spec[0, 1:,:]) + spec = torch.flipud(spec[0, 1:, :]) return spec def pcen(spec_cropped, sampling_rate): # TODO should be passing hop_length too i.e. step - spec = librosa.pcen(spec_cropped * (2**31), sr=sampling_rate/10).astype(np.float32) + spec = librosa.pcen(spec_cropped * (2**31), sr=sampling_rate / 10).astype( + np.float32 + ) return spec diff --git a/bat_detect/utils/detector_utils.py b/bat_detect/utils/detector_utils.py index fef9828..cd71ee6 100644 --- a/bat_detect/utils/detector_utils.py +++ b/bat_detect/utils/detector_utils.py @@ -1,291 +1,811 @@ -import torch -import torch.nn.functional as F +import json import os +from typing import Any, Iterator, List, Optional, Tuple, Union + import numpy as np import pandas as pd -import json -import sys +import torch +import torch.nn.functional as F -from bat_detect.detector import models import bat_detect.detector.compute_features as feats import bat_detect.detector.post_process as pp import bat_detect.utils.audio_utils as au +from bat_detect.detector import models +from bat_detect.detector.parameters import DEFAULT_MODEL_PATH +from bat_detect.types import ( + Annotation, + DetectionModel, + FileAnnotations, + ModelOutput, + ModelParameters, + PredictionResults, + ProcessingConfiguration, + ResultParams, + RunResults, + SpectrogramParameters, +) + +__all__ = [ + "load_model", + "list_audio_files", + "format_single_result", + "save_results_to_file", + "iterate_over_chunks", + "process_spectrogram", + "process_audio_array", + "process_file", +] -def get_default_bd_args(): - args = {} - args['detection_threshold'] = 0.001 - args['time_expansion_factor'] = 1 - args['audio_dir'] = '' - args['ann_dir'] = '' - args['spec_slices'] = False - args['chunk_size'] = 3 - args['spec_features'] = False - args['cnn_features'] = False - args['quiet'] = True - args['save_preds_if_empty'] = True - args['ann_dir'] = os.path.join(args['ann_dir'], '') - return args - - -def get_audio_files(ip_dir): +def list_audio_files(ip_dir: str) -> List[str]: + """Get all audio files in directory. + Args: + ip_dir (str): Input directory. + + Returns: + list: List of audio files. Only .wav files are returned. Paths are + relative to ip_dir. + + Raises: + FileNotFoundError: Input directory not found. + + """ matches = [] - for root, dirnames, filenames in os.walk(ip_dir): + for root, _, filenames in os.walk(ip_dir): for filename in filenames: - if filename.lower().endswith('.wav'): + if filename.lower().endswith(".wav"): matches.append(os.path.join(root, filename)) return matches -def load_model(model_path, load_weights=True): +def load_model( + model_path: str = DEFAULT_MODEL_PATH, + load_weights: bool = True, + device: Optional[torch.device] = None, +) -> Tuple[DetectionModel, ModelParameters]: + """Load model from file. - # load model - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - if os.path.isfile(model_path): - net_params = torch.load(model_path, map_location=device) + Args: + model_path (str): Path to model file. Defaults to DEFAULT_MODEL_PATH. + load_weights (bool, optional): Load weights. Defaults to True. + + Returns: + model, params: Model and parameters. + + Raises: + FileNotFoundError: Model file not found. + ValueError: Unknown model name. + """ + if device is None: + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + if not os.path.isfile(model_path): + raise FileNotFoundError("Model file not found.") + + net_params = torch.load(model_path, map_location=device) + + params = net_params["params"] + + model: DetectionModel + + if params["model_name"] == "Net2DFast": + model = models.Net2DFast( + params["num_filters"], + num_classes=len(params["class_names"]), + emb_dim=params["emb_dim"], + ip_height=params["ip_height"], + resize_factor=params["resize_factor"], + ) + elif params["model_name"] == "Net2DFastNoAttn": + model = models.Net2DFastNoAttn( + params["num_filters"], + num_classes=len(params["class_names"]), + emb_dim=params["emb_dim"], + ip_height=params["ip_height"], + resize_factor=params["resize_factor"], + ) + elif params["model_name"] == "Net2DFastNoCoordConv": + model = models.Net2DFastNoCoordConv( + params["num_filters"], + num_classes=len(params["class_names"]), + emb_dim=params["emb_dim"], + ip_height=params["ip_height"], + resize_factor=params["resize_factor"], + ) else: - print('Error: model not found.') - sys.exit(1) - - params = net_params['params'] - params['device'] = device - - if params['model_name'] == 'Net2DFast': - model = models.Net2DFast(params['num_filters'], num_classes=len(params['class_names']), - emb_dim=params['emb_dim'], ip_height=params['ip_height'], - resize_factor=params['resize_factor']) - elif params['model_name'] == 'Net2DFastNoAttn': - model = models.Net2DFastNoAttn(params['num_filters'], num_classes=len(params['class_names']), - emb_dim=params['emb_dim'], ip_height=params['ip_height'], - resize_factor=params['resize_factor']) - elif params['model_name'] == 'Net2DFastNoCoordConv': - model = models.Net2DFastNoCoordConv(params['num_filters'], num_classes=len(params['class_names']), - emb_dim=params['emb_dim'], ip_height=params['ip_height'], - resize_factor=params['resize_factor']) - else: - print('Error: unknown model.') + raise ValueError("Unknown model.") if load_weights: - model.load_state_dict(net_params['state_dict']) + model.load_state_dict(net_params["state_dict"]) - model = model.to(params['device']) + model = model.to(device) model.eval() return model, params -def merge_results(predictions, spec_feats, cnn_feats, spec_slices): - +def _merge_results(predictions, spec_feats, cnn_feats, spec_slices): predictions_m = {} - num_preds = np.sum([len(pp['det_probs']) for pp in predictions]) + num_preds = np.sum([len(pp["det_probs"]) for pp in predictions]) if num_preds > 0: - for kk in predictions[0].keys(): - predictions_m[kk] = np.hstack([pp[kk] for pp in predictions if pp['det_probs'].shape[0] > 0]) + for key in predictions[0].keys(): + predictions_m[key] = np.hstack( + [pp[key] for pp in predictions if pp["det_probs"].shape[0] > 0] + ) else: - # hack in case where no detected calls as we need some of the key names in dict + # hack in case where no detected calls as we need some of the key + # names in dict predictions_m = predictions[0] if len(spec_feats) > 0: spec_feats = np.vstack(spec_feats) + if len(cnn_feats) > 0: cnn_feats = np.vstack(cnn_feats) + return predictions_m, spec_feats, cnn_feats, spec_slices -def convert_results(file_id, time_exp, duration, params, predictions, spec_feats, cnn_feats, spec_slices): +def get_annotations_from_preds( + predictions: PredictionResults, + class_names: List[str], +) -> List[Annotation]: + """Get list of annotations from predictions.""" + # Get the best class prediction probability and index for each detection + class_prob_best = predictions["class_probs"].max(0) + class_ind_best = predictions["class_probs"].argmax(0) - # create a single dictionary - this is the format used by the annotation tool - pred_dict = {} - pred_dict['id'] = file_id - pred_dict['annotated'] = False - pred_dict['issues'] = False - pred_dict['notes'] = 'Automatically generated.' - pred_dict['time_exp'] = time_exp - pred_dict['duration'] = round(duration, 4) - pred_dict['annotation'] = [] + # Pack the results into a list of dictionaries + annotations: List[Annotation] = [ + { + "start_time": round(float(start_time), 4), + "end_time": round(float(end_time), 4), + "low_freq": int(low_freq), + "high_freq": int(high_freq), + "class": str(class_names[class_index]), + "class_prob": round(float(class_prob), 3), + "det_prob": round(float(det_prob), 3), + "individual": "-1", + "event": "Echolocation", + } + for ( + start_time, + end_time, + low_freq, + high_freq, + class_index, + class_prob, + det_prob, + ) in zip( + predictions["start_times"], + predictions["end_times"], + predictions["low_freqs"], + predictions["high_freqs"], + class_ind_best, + class_prob_best, + predictions["det_probs"], + ) + ] + return annotations - class_prob_best = predictions['class_probs'].max(0) - class_ind_best = predictions['class_probs'].argmax(0) - class_overall = pp.overall_class_pred(predictions['det_probs'], predictions['class_probs']) - pred_dict['class_name'] = params['class_names'][np.argmax(class_overall)] - for ii in range(predictions['det_probs'].shape[0]): - res = {} - res['start_time'] = round(float(predictions['start_times'][ii]), 4) - res['end_time'] = round(float(predictions['end_times'][ii]), 4) - res['low_freq'] = int(predictions['low_freqs'][ii]) - res['high_freq'] = int(predictions['high_freqs'][ii]) - res['class'] = str(params['class_names'][int(class_ind_best[ii])]) - res['class_prob'] = round(float(class_prob_best[ii]), 3) - res['det_prob'] = round(float(predictions['det_probs'][ii]), 3) - res['individual'] = '-1' - res['event'] = 'Echolocation' - pred_dict['annotation'].append(res) +def format_single_result( + file_id: str, + time_exp: float, + duration: float, + predictions: PredictionResults, + class_names: List[str], +) -> FileAnnotations: + """Format results into the format expected by the annotation tool. + + Args: + file_id (str): File ID. + time_exp (float): Time expansion factor. + duration (float): Duration of audio file. + predictions (dict): Predictions. + + Returns: + dict: Results in the format expected by the annotation tool. + """ + # Get a single class prediction for the file + class_overall = pp.overall_class_pred( + predictions["det_probs"], + predictions["class_probs"], + ) + + return { + "id": file_id, + "annotated": False, + "issues": False, + "notes": "Automatically generated.", + "time_exp": time_exp, + "duration": round(float(duration), 4), + "annotation": get_annotations_from_preds(predictions, class_names), + "class_name": class_names[np.argmax(class_overall)], + } + + +def convert_results( + file_id: str, + time_exp: float, + duration: float, + params: ResultParams, + predictions, + spec_feats, + cnn_feats, + spec_slices, +) -> RunResults: + """Convert results to dictionary as expected by the annotation tool. + + Args: + file_id (str): File ID. + time_exp (float): Time expansion factor. + duration (float): Duration of audio file. + params (dict): Model parameters. + predictions (dict): Predictions. + spec_feats (np.ndarray): Spectral features. + cnn_feats (np.ndarray): CNN features. + spec_slices (list): Spectrogram slices. + + Returns: + dict: Dictionary with results. + + """ + pred_dict = format_single_result( + file_id, + time_exp, + duration, + predictions, + params["class_names"], + ) # combine into final results dictionary - results = {} - results['pred_dict'] = pred_dict - if len(spec_feats) > 0: - results['spec_feats'] = spec_feats - results['spec_feat_names'] = feats.get_feature_names() - if len(cnn_feats) > 0: - results['cnn_feats'] = cnn_feats - results['cnn_feat_names'] = [str(ii) for ii in range(cnn_feats.shape[1])] - if len(spec_slices) > 0: - results['spec_slices'] = spec_slices + results: RunResults = { + "pred_dict": pred_dict, + } + + # add spectrogram features if they exist + if len(spec_feats) > 0 and params["spec_features"]: + results["spec_feats"] = spec_feats + results["spec_feat_names"] = feats.get_feature_names() + + # add CNN features if they exist + if len(cnn_feats) > 0 and params["cnn_features"]: + results["cnn_feats"] = cnn_feats + results["cnn_feat_names"] = [ + str(ii) for ii in range(cnn_feats.shape[1]) + ] + + # add spectrogram slices if they exist + if len(spec_slices) > 0 and params["spec_slices"]: + results["spec_slices"] = spec_slices return results -def save_results_to_file(results, op_path): +def save_results_to_file(results, op_path: str) -> None: + """Save results to file. + + Will create the output directory if it does not exist. + + Args: + results (dict): Results. + op_path (str): Output path. + + """ # make directory if it does not exist if not os.path.isdir(os.path.dirname(op_path)): os.makedirs(os.path.dirname(op_path)) # save csv file - if there are predictions - result_list = [res for res in results['pred_dict']['annotation']] - df = pd.DataFrame(result_list) - df['file_name'] = [results['pred_dict']['id']]*len(result_list) - df.index.name = 'id' - if 'class_prob' in df.columns: - df = df[['det_prob', 'start_time', 'end_time', 'high_freq', - 'low_freq', 'class', 'class_prob']] - df.to_csv(op_path + '.csv', sep=',') + result_list = results["pred_dict"]["annotation"] - # save features - if 'spec_feats' in results.keys(): - df = pd.DataFrame(results['spec_feats'], columns=results['spec_feat_names']) - df.to_csv(op_path + '_spec_features.csv', sep=',', index=False, float_format='%.5f') + results_df = pd.DataFrame(result_list) - if 'cnn_feats' in results.keys(): - df = pd.DataFrame(results['cnn_feats'], columns=results['cnn_feat_names']) - df.to_csv(op_path + '_cnn_features.csv', sep=',', index=False, float_format='%.5f') + # add file name as a column + results_df["file_name"] = results["pred_dict"]["id"] + + # rename index column + results_df.index.name = "id" + + # create a csv file with predicted events + if "class_prob" in results_df.columns: + preds_df = results_df[ + [ + "det_prob", + "start_time", + "end_time", + "high_freq", + "low_freq", + "class", + "class_prob", + ] + ] + preds_df.to_csv(op_path + ".csv", sep=",") + + if "spec_feats" in results.keys(): + # create csv file with spectrogram features + spec_feats_df = pd.DataFrame( + results["spec_feats"], + columns=results["spec_feat_names"], + ) + spec_feats_df.to_csv( + op_path + "_spec_features.csv", + sep=",", + index=False, + float_format="%.5f", + ) + + if "cnn_feats" in results.keys(): + # create csv file with cnn extracted features + cnn_feats_df = pd.DataFrame( + results["cnn_feats"], + columns=results["cnn_feat_names"], + ) + cnn_feats_df.to_csv( + op_path + "_cnn_features.csv", + sep=",", + index=False, + float_format="%.5f", + ) # save json file - with open(op_path + '.json', 'w') as da: - json.dump(results['pred_dict'], da, indent=2, sort_keys=True) + with open(op_path + ".json", "w", encoding="utf-8") as jsonfile: + json.dump(results["pred_dict"], jsonfile, indent=2, sort_keys=True) -def compute_spectrogram(audio, sampling_rate, params, return_np=False): +def compute_spectrogram( + audio: np.ndarray, + sampling_rate: int, + params: SpectrogramParameters, + device: torch.device, + return_np: bool = False, +) -> Tuple[float, torch.Tensor, Optional[np.ndarray]]: + """Compute a spectrogram from an audio array. + Will pad the audio array so that it is evenly divisible by the + downsampling factors. + + Parameters + ---------- + audio : np.ndarray + + sampling_rate : int + + params : SpectrogramParameters + The parameters to use for generating the spectrogram. + + return_np : bool, optional + Whether to return the spectrogram as a numpy array as well as a + torch tensor. The default is False. + + Returns + ------- + duration : float + The duration of the spectrgram in seconds. + + spec : torch.Tensor + The spectrogram as a torch tensor. + + spec_np : np.ndarray, optional + The spectrogram as a numpy array. Only returned if `return_np` is + True, otherwise None. + """ # pad audio so it is evenly divisible by downsampling factors duration = audio.shape[0] / float(sampling_rate) - audio = au.pad_audio(audio, sampling_rate, params['fft_win_length'], - params['fft_overlap'], params['resize_factor'], - params['spec_divide_factor']) + audio = au.pad_audio( + audio, + sampling_rate, + params["fft_win_length"], + params["fft_overlap"], + params["resize_factor"], + params["spec_divide_factor"], + ) # generate spectrogram spec, _ = au.generate_spectrogram(audio, sampling_rate, params) # convert to pytorch - spec = torch.from_numpy(spec).to(params['device']) + spec = torch.from_numpy(spec).to(device) + + # add batch and channel dimensions spec = spec.unsqueeze(0).unsqueeze(0) # resize the spec - rs = params['resize_factor'] - spec_op_shape = (int(params['spec_height']*rs), int(spec.shape[-1]*rs)) - spec = F.interpolate(spec, size=spec_op_shape, mode='bilinear', align_corners=False) + resize_factor = params["resize_factor"] + spec_op_shape = ( + int(params["spec_height"] * resize_factor), + int(spec.shape[-1] * resize_factor), + ) + spec = F.interpolate( + spec, + size=spec_op_shape, + mode="bilinear", + align_corners=False, + ) if return_np: - spec_np = spec[0,0,:].cpu().data.numpy() + spec_np = spec[0, 0, :].cpu().data.numpy() else: spec_np = None return duration, spec, spec_np -def process_file(audio_file, model, params, args, time_exp=None, top_n=5, return_raw_preds=False, max_duration=False): +def iterate_over_chunks( + audio: np.ndarray, + samplerate: int, + chunk_size: float, +) -> Iterator[Tuple[float, np.ndarray]]: + """Iterate over audio in chunks of size chunk_size. + Parameters + ---------- + audio : np.ndarray + + samplerate : int + + chunk_size : float + Size of chunks in seconds. + + Yields + ------ + chunk_start : float + Start time of chunk in seconds. + chunk : np.ndarray + + """ + nsamples = audio.shape[0] + duration_full = nsamples / samplerate + num_chunks = int(np.ceil(duration_full / chunk_size)) + for chunk_id in range(num_chunks): + chunk_start = chunk_size * chunk_id + chunk_length = int(samplerate * chunk_size) + start_sample = chunk_id * chunk_length + end_sample = np.minimum((chunk_id + 1) * chunk_length, nsamples) + yield chunk_start, audio[start_sample:end_sample] + + +def _process_spectrogram( + spec: torch.Tensor, + samplerate: int, + model: DetectionModel, + config: ProcessingConfiguration, +) -> Tuple[PredictionResults, List[np.ndarray]]: + # evaluate model + with torch.no_grad(): + outputs = model(spec) + + # run non-max suppression + pred_nms_list, features = pp.run_nms( + outputs, + { + "nms_kernel_size": config["nms_kernel_size"], + "max_freq": config["max_freq"], + "min_freq": config["min_freq"], + "fft_win_length": config["fft_win_length"], + "fft_overlap": config["fft_overlap"], + "resize_factor": config["resize_factor"], + "nms_top_k_per_sec": config["nms_top_k_per_sec"], + "detection_threshold": config["detection_threshold"], + }, + np.array([float(samplerate)]), + ) + + pred_nms = pred_nms_list[0] + + # if we have a background class + class_probs = pred_nms.get("class_probs") + if (class_probs is not None) and ( + class_probs.shape[0] > len(config["class_names"]) + ): + pred_nms["class_probs"] = class_probs[:-1, :] + + return pred_nms, features + + +def postprocess_model_outputs( + outputs: ModelOutput, + samp_rate: int, + config: ProcessingConfiguration, +) -> Tuple[List[Annotation], np.ndarray]: + # run non-max suppression + pred_nms_list, features = pp.run_nms( + outputs, + { + "nms_kernel_size": config["nms_kernel_size"], + "max_freq": config["max_freq"], + "min_freq": config["min_freq"], + "fft_win_length": config["fft_win_length"], + "fft_overlap": config["fft_overlap"], + "resize_factor": config["resize_factor"], + "nms_top_k_per_sec": config["nms_top_k_per_sec"], + "detection_threshold": config["detection_threshold"], + }, + np.array([float(samp_rate)]), + ) + + pred_nms = pred_nms_list[0] + + # if we have a background class + class_probs = pred_nms.get("class_probs") + if (class_probs is not None) and ( + class_probs.shape[0] > len(config["class_names"]) + ): + pred_nms["class_probs"] = class_probs[:-1, :] + + annotations = get_annotations_from_preds( + pred_nms, + config["class_names"], + ) + + return annotations, features[0] + + +def process_spectrogram( + spec: torch.Tensor, + samplerate: int, + model: DetectionModel, + config: ProcessingConfiguration, +) -> Tuple[List[Annotation], List[np.ndarray]]: + """Process a spectrogram with detection model. + + Will run non-maximum suppression on the output of the model. + + Parameters + ---------- + spec : torch.Tensor + + samplerate : int + + model : torch.nn.Module + Detection model. + + config : pp.NonMaximumSuppressionConfig + Parameters for non-maximum suppression. + + Returns + ------- + annotations : List[Annotation] + List of annotations predicted by the model. + features : List[np.ndarray] + List of CNN features associated with each annotation. + Is empty if `config["cnn_features"]` is False. + """ + pred_nms, features = _process_spectrogram( + spec, + samplerate, + model, + config, + ) + + annotations = get_annotations_from_preds( + pred_nms, + config["class_names"], + ) + + return annotations, features + + +def _process_audio_array( + audio: np.ndarray, + sampling_rate: int, + model: DetectionModel, + config: ProcessingConfiguration, + device: torch.device, +) -> Tuple[PredictionResults, List[np.ndarray], torch.Tensor]: + # load audio file and compute spectrogram + _, spec, _ = compute_spectrogram( + audio, + sampling_rate, + { + "fft_win_length": config["fft_win_length"], + "fft_overlap": config["fft_overlap"], + "spec_height": config["spec_height"], + "resize_factor": config["resize_factor"], + "spec_divide_factor": config["spec_divide_factor"], + "max_freq": config["max_freq"], + "min_freq": config["min_freq"], + "spec_scale": config["spec_scale"], + "denoise_spec_avg": config["denoise_spec_avg"], + "max_scale_spec": config["max_scale_spec"], + }, + device, + return_np=False, + ) + + # process spectrogram with model + pred_nms, features = _process_spectrogram( + spec, + sampling_rate, + model, + config, + ) + + return pred_nms, features, spec + + +def process_audio_array( + audio: np.ndarray, + sampling_rate: int, + model: DetectionModel, + config: ProcessingConfiguration, + device: torch.device, +) -> Tuple[List[Annotation], List[np.ndarray], torch.Tensor]: + """Process a single audio array with detection model. + + Parameters + ---------- + audio : np.ndarray + + sampling_rate : int + + model : torch.nn.Module + Detection model. + + config : ProcessingConfiguration + Configuration for processing. + + device : torch.device + Device to use for processing. + + Returns + ------- + annotations : List[Annotation] + List of annotations predicted by the model. + + features : List[np.ndarray] + List of CNN features associated with each annotation. + + spec : torch.Tensor + Spectrogram of the audio used as input. + + """ + pred_nms, features, spec = _process_audio_array( + audio, + sampling_rate, + model, + config, + device, + ) + + annotations = get_annotations_from_preds( + pred_nms, + config["class_names"], + ) + + return annotations, features, spec + + +def process_file( + audio_file: str, + model: DetectionModel, + config: ProcessingConfiguration, + device: torch.device, +) -> Union[RunResults, Any]: + """Process a single audio file with detection model. + + Will split the audio file into chunks if it is too long and + process each chunk separately. + + Parameters + ---------- + audio_file : str + Path to audio file. + + model : torch.nn.Module + Detection model. + + config : ProcessingConfiguration + Configuration for processing. + + Returns + ------- + results : Results or Any + Results of processing audio file with the given detection model. + Will be a dictionary if `config["return_raw_preds"]` is `True`, + """ # store temporary results here predictions = [] - spec_feats = [] - cnn_feats = [] + spec_feats = [] + cnn_feats = [] spec_slices = [] - # get time expansion factor - if time_exp is None: - time_exp = args['time_expansion_factor'] - - params['detection_threshold'] = args['detection_threshold'] - # load audio file - sampling_rate, audio_full = au.load_audio_file(audio_file, time_exp, - params['target_samp_rate'], params['scale_raw_audio']) - - # clipping maximum duration - if max_duration is not False: - max_duration = np.minimum(int(sampling_rate*max_duration), audio_full.shape[0]) - audio_full = audio_full[:max_duration] - - duration_full = audio_full.shape[0] / float(sampling_rate) - - return_np_spec = args['spec_features'] or args['spec_slices'] + sampling_rate, audio_full = au.load_audio( + audio_file, + time_exp_fact=config.get("time_expansion", 1) or 1, + target_samp_rate=config["target_samp_rate"], + scale=config["scale_raw_audio"], + max_duration=config.get("max_duration"), + ) # loop through larger file and split into chunks - # TODO fix so that it overlaps correctly and takes care of duplicate detections at borders - num_chunks = int(np.ceil(duration_full/args['chunk_size'])) - for chunk_id in range(num_chunks): + # TODO: fix so that it overlaps correctly and takes care of + # duplicate detections at borders + for chunk_time, audio in iterate_over_chunks( + audio_full, + sampling_rate, + config["chunk_size"], + ): + # Run detection model on chunk + pred_nms, features, spec_np = _process_audio_array( + audio, + sampling_rate, + model, + config, + device, + ) - # chunk - chunk_time = args['chunk_size']*chunk_id - chunk_length = int(sampling_rate*args['chunk_size']) - start_sample = chunk_id*chunk_length - end_sample = np.minimum((chunk_id+1)*chunk_length, audio_full.shape[0]) - audio = audio_full[start_sample:end_sample] - - # load audio file and compute spectrogram - duration, spec, spec_np = compute_spectrogram(audio, sampling_rate, params, return_np_spec) - - # evaluate model - with torch.no_grad(): - outputs = model(spec, return_feats=args['cnn_features']) - - # run non-max suppression - pred_nms, features = pp.run_nms(outputs, params, np.array([float(sampling_rate)])) - pred_nms = pred_nms[0] - pred_nms['start_times'] += chunk_time - pred_nms['end_times'] += chunk_time - - # if we have a background class - if pred_nms['class_probs'].shape[0] > len(params['class_names']): - pred_nms['class_probs'] = pred_nms['class_probs'][:-1, :] + # add chunk time to start and end times + pred_nms["start_times"] += chunk_time + pred_nms["end_times"] += chunk_time predictions.append(pred_nms) # extract features - if there are any calls detected - if (pred_nms['det_probs'].shape[0] > 0): - if args['spec_features']: - spec_feats.append(feats.get_feats(spec_np, pred_nms, params)) + if pred_nms["det_probs"].shape[0] == 0: + continue - if args['cnn_features']: - cnn_feats.append(features[0]) + if config["spec_features"]: + spec_feats.append(feats.get_feats(spec_np, pred_nms, config)) - if args['spec_slices']: - spec_slices.extend(feats.extract_spec_slices(spec_np, pred_nms, params)) + if config["cnn_features"]: + cnn_feats.append(features[0]) - # convert the predictions into output dictionary - file_id = os.path.basename(audio_file) - predictions, spec_feats, cnn_feats, spec_slices =\ - merge_results(predictions, spec_feats, cnn_feats, spec_slices) - results = convert_results(file_id, time_exp, duration_full, params, - predictions, spec_feats, cnn_feats, spec_slices) + if config["spec_slices"]: + spec_slices.extend( + feats.extract_spec_slices(spec_np, pred_nms, config) + ) + + # Merge results from chunks + predictions, spec_feats, cnn_feats, spec_slices = _merge_results( + predictions, + spec_feats, + cnn_feats, + spec_slices, + ) + + # convert results to a dictionary in the right format + results = convert_results( + file_id=os.path.basename(audio_file), + time_exp=config.get("time_expansion", 1) or 1, + duration=audio_full.shape[0] / float(sampling_rate), + params=config, + predictions=predictions, + spec_feats=spec_feats, + cnn_feats=cnn_feats, + spec_slices=spec_slices, + ) # summarize results - if not args['quiet']: - num_detections = len(results['pred_dict']['annotation']) - print('{}'.format(num_detections) + ' call(s) detected above the threshold.') + if not config["quiet"]: + summarize_results(results, predictions, config) + + if config["return_raw_preds"]: + return predictions + + return results + + +def summarize_results(results, predictions, config): + """Print summary of results.""" + num_detections = len(results["pred_dict"]["annotation"]) + print(f"{num_detections} call(s) detected above the threshold.") # print results for top n classes - if not args['quiet'] and (num_detections > 0): - class_overall = pp.overall_class_pred(predictions['det_probs'], predictions['class_probs']) - print('species name'.ljust(30) + 'probablity present') - for cc in np.argsort(class_overall)[::-1][:top_n]: - print(params['class_names'][cc].ljust(30) + str(round(class_overall[cc], 3))) + if num_detections > 0: + class_overall = pp.overall_class_pred( + predictions["det_probs"], + predictions["class_probs"], + ) + print("species name".ljust(30) + "probablity present") - if return_raw_preds: - return predictions - else: - return results + for class_index in np.argsort(class_overall)[::-1][: config["top_n"]]: + print( + config["class_names"][class_index].ljust(30) + + str(round(class_overall[class_index], 3)) + ) diff --git a/bat_detect/utils/plot_utils.py b/bat_detect/utils/plot_utils.py index 5b38f65..6fcb387 100644 --- a/bat_detect/utils/plot_utils.py +++ b/bat_detect/utils/plot_utils.py @@ -1,63 +1,107 @@ -import numpy as np -import matplotlib.pyplot as plt import json -from sklearn.metrics import confusion_matrix + +import matplotlib.pyplot as plt +import numpy as np from matplotlib import patches from matplotlib.collections import PatchCollection - -from . import audio_utils as au +from sklearn.metrics import confusion_matrix -def create_box_image(spec, fig, detections_ip, start_time, end_time, duration, params, max_val, hide_axis=True, plot_class_names=False): +def create_box_image( + spec, + fig, + detections_ip, + start_time, + end_time, + duration, + params, + max_val, + hide_axis=True, + plot_class_names=False, +): # filter detections stop_time = start_time + duration detections = [] for bb in detections_ip: - if (bb['start_time'] >= start_time) and (bb['start_time'] < stop_time-0.02): #(bb['end_time'] < end_time): + if (bb["start_time"] >= start_time) and ( + bb["start_time"] < stop_time - 0.02 + ): # (bb['end_time'] < end_time): detections.append(bb) # create figure freq_scale = 1000 # turn Hz to kHz - min_freq = params['min_freq']//freq_scale - max_freq = params['max_freq']//freq_scale + min_freq = params["min_freq"] // freq_scale + max_freq = params["max_freq"] // freq_scale y_extent = [0, duration, min_freq, max_freq] if hide_axis: - ax = plt.Axes(fig, [0., 0., 1., 1.]) + ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0]) ax.set_axis_off() fig.add_axes(ax) else: ax = plt.gca() - plt.imshow(spec, aspect='auto', cmap='plasma', extent=y_extent, vmin=0, vmax=max_val) + plt.imshow( + spec, + aspect="auto", + cmap="plasma", + extent=y_extent, + vmin=0, + vmax=max_val, + ) boxes = plot_bounding_box_patch_ann(detections, freq_scale, start_time) ax.add_collection(PatchCollection(boxes, match_original=True)) plt.grid(False) if plot_class_names: for ii, bb in enumerate(boxes): - txt = ' '.join([sp[:3] for sp in detections_ip[ii]['class'].split(' ')]) - font_info = {'color': 'white', 'size': 10, 'weight': 'bold', 'alpha': bb.get_alpha()} + txt = " ".join( + [sp[:3] for sp in detections_ip[ii]["class"].split(" ")] + ) + font_info = { + "color": "white", + "size": 10, + "weight": "bold", + "alpha": bb.get_alpha(), + } y_pos = bb.get_xy()[1] + bb.get_height() if y_pos > (max_freq - 10): y_pos = max_freq - 10 plt.gca().text(bb.get_xy()[0], y_pos, txt, fontdict=font_info) -def save_ann_spec(op_path, spec, min_freq, max_freq, duration, start_time, title_text='', anns=None): +def save_ann_spec( + op_path, + spec, + min_freq, + max_freq, + duration, + start_time, + title_text="", + anns=None, +): # create figure and plot boxes freq_scale = 1000 # turn Hz to kHz - min_freq = min_freq//freq_scale - max_freq = max_freq//freq_scale + min_freq = min_freq // freq_scale + max_freq = max_freq // freq_scale y_extent = [0, duration, min_freq, max_freq] - plt.close('all') - fig = plt.figure(0, figsize=(spec.shape[1]/100, spec.shape[0]/100), dpi=100) - plt.imshow(spec, aspect='auto', cmap='plasma', extent=y_extent, vmin=0, vmax=spec.max()*1.1) + plt.close("all") + fig = plt.figure( + 0, figsize=(spec.shape[1] / 100, spec.shape[0] / 100), dpi=100 + ) + plt.imshow( + spec, + aspect="auto", + cmap="plasma", + extent=y_extent, + vmin=0, + vmax=spec.max() * 1.1, + ) - plt.ylabel('Freq - kHz') - plt.xlabel('Time - secs') - if title_text != '': + plt.ylabel("Freq - kHz") + plt.xlabel("Time - secs") + if title_text != "": plt.title(title_text) plt.tight_layout() @@ -66,122 +110,185 @@ def save_ann_spec(op_path, spec, min_freq, max_freq, duration, start_time, title boxes = plot_bounding_box_patch_ann(anns, freq_scale, start_time) plt.gca().add_collection(PatchCollection(boxes, match_original=True)) for ii, bb in enumerate(boxes): - txt = ' '.join([sp[:3] for sp in anns[ii]['class'].split(' ')]) - font_info = {'color': 'white', 'size': 10, 'weight': 'bold', 'alpha': bb.get_alpha()} + txt = " ".join([sp[:3] for sp in anns[ii]["class"].split(" ")]) + font_info = { + "color": "white", + "size": 10, + "weight": "bold", + "alpha": bb.get_alpha(), + } y_pos = bb.get_xy()[1] + bb.get_height() if y_pos > (max_freq - 10): y_pos = max_freq - 10 plt.gca().text(bb.get_xy()[0], y_pos, txt, fontdict=font_info) - print('Saving figure to:', op_path) + print("Saving figure to:", op_path) plt.savefig(op_path) -def plot_pts(fig_id, feats, class_names, colors, marker_size=4.0, plot_legend=False): +def plot_pts( + fig_id, feats, class_names, colors, marker_size=4.0, plot_legend=False +): plt.figure(fig_id) un_class, labels = np.unique(class_names, return_inverse=True) un_labels = np.unique(labels) if un_labels.shape[0] > len(colors): - colors = [plt.cm.jet(float(ii)/un_labels.shape[0]) for ii in un_labels] + colors = [ + plt.cm.jet(float(ii) / un_labels.shape[0]) for ii in un_labels + ] for ii, u in enumerate(un_labels): - inds = np.where(labels==u)[0] - plt.scatter(feats[inds, 0], feats[inds, 1], c=colors[ii], label=str(un_class[ii]), s=marker_size) + inds = np.where(labels == u)[0] + plt.scatter( + feats[inds, 0], + feats[inds, 1], + c=colors[ii], + label=str(un_class[ii]), + s=marker_size, + ) if plot_legend: plt.legend() plt.xticks([]) plt.yticks([]) - plt.title('downsampled features') + plt.title("downsampled features") -def plot_bounding_box_patch(pred, freq_scale, ecolor='w'): +def plot_bounding_box_patch(pred, freq_scale, ecolor="w"): patch_collect = [] - for bb in range(len(pred['start_times'])): - xx = pred['start_times'][bb] - ww = pred['end_times'][bb] - pred['start_times'][bb] - yy = pred['low_freqs'][bb] / freq_scale - hh = (pred['high_freqs'][bb] - pred['low_freqs'][bb]) / freq_scale + for bb in range(len(pred["start_times"])): + xx = pred["start_times"][bb] + ww = pred["end_times"][bb] - pred["start_times"][bb] + yy = pred["low_freqs"][bb] / freq_scale + hh = (pred["high_freqs"][bb] - pred["low_freqs"][bb]) / freq_scale - if 'det_probs' in pred.keys(): - alpha_val = pred['det_probs'][bb] + if "det_probs" in pred.keys(): + alpha_val = pred["det_probs"][bb] else: alpha_val = 1.0 - patch_collect.append(patches.Rectangle((xx, yy), ww, hh, linewidth=1, - edgecolor=ecolor, facecolor='none', alpha=alpha_val)) + patch_collect.append( + patches.Rectangle( + (xx, yy), + ww, + hh, + linewidth=1, + edgecolor=ecolor, + facecolor="none", + alpha=alpha_val, + ) + ) return patch_collect def plot_bounding_box_patch_ann(anns, freq_scale, start_time): patch_collect = [] for aa in range(len(anns)): - xx = anns[aa]['start_time'] - start_time - ww = anns[aa]['end_time'] - anns[aa]['start_time'] - yy = anns[aa]['low_freq'] / freq_scale - hh = (anns[aa]['high_freq'] - anns[aa]['low_freq']) / freq_scale - if 'det_prob' in anns[aa]: - alpha = anns[aa]['det_prob'] + xx = anns[aa]["start_time"] - start_time + ww = anns[aa]["end_time"] - anns[aa]["start_time"] + yy = anns[aa]["low_freq"] / freq_scale + hh = (anns[aa]["high_freq"] - anns[aa]["low_freq"]) / freq_scale + if "det_prob" in anns[aa]: + alpha = anns[aa]["det_prob"] else: alpha = 1.0 - patch_collect.append(patches.Rectangle((xx,yy), ww, hh, linewidth=1, - edgecolor='w', facecolor='none', alpha=alpha)) + patch_collect.append( + patches.Rectangle( + (xx, yy), + ww, + hh, + linewidth=1, + edgecolor="w", + facecolor="none", + alpha=alpha, + ) + ) return patch_collect -def plot_spec(spec, sampling_rate, duration, gt, pred, params, plot_title, - op_file_name, pred_2d_hm, plot_boxes=True, fixed_aspect=True): +def plot_spec( + spec, + sampling_rate, + duration, + gt, + pred, + params, + plot_title, + op_file_name, + pred_2d_hm, + plot_boxes=True, + fixed_aspect=True, +): if fixed_aspect: # ouptut image will be this width irrespective of the duration of the audio file width = 12 else: - width = 12*duration + width = 12 * duration fig = plt.figure(1, figsize=(width, 8)) - ax0 = plt.axes([0.05, 0.65, 0.9, 0.30]) # l b w h + ax0 = plt.axes([0.05, 0.65, 0.9, 0.30]) # l b w h ax1 = plt.axes([0.05, 0.33, 0.9, 0.30]) ax2 = plt.axes([0.05, 0.01, 0.9, 0.30]) freq_scale = 1000 # turn Hz in kHz - #duration = au.x_coords_to_time(spec.shape[1], sampling_rate, params['fft_win_length'], params['fft_overlap']) - y_extent = [0, duration, params['min_freq']//freq_scale, params['max_freq']//freq_scale] + # duration = au.x_coords_to_time(spec.shape[1], sampling_rate, params['fft_win_length'], params['fft_overlap']) + y_extent = [ + 0, + duration, + params["min_freq"] // freq_scale, + params["max_freq"] // freq_scale, + ] # plot gt boxes - ax0.imshow(spec, aspect='auto', cmap='plasma', extent=y_extent) + ax0.imshow(spec, aspect="auto", cmap="plasma", extent=y_extent) ax0.xaxis.set_ticklabels([]) - font_info = {'color': 'white', 'size': 12, 'weight': 'bold'} - ax0.text(0, params['min_freq']//freq_scale, 'Ground Truth', fontdict=font_info) + font_info = {"color": "white", "size": 12, "weight": "bold"} + ax0.text( + 0, params["min_freq"] // freq_scale, "Ground Truth", fontdict=font_info + ) plt.grid(False) if plot_boxes: boxes = plot_bounding_box_patch(gt, freq_scale) ax0.add_collection(PatchCollection(boxes, match_original=True)) for ii, bb in enumerate(boxes): - class_id = int(gt['class_ids'][ii]) + class_id = int(gt["class_ids"][ii]) if class_id < 0: - txt = params['generic_class'][0] + txt = params["generic_class"][0] else: - txt = params['class_names_short'][class_id] - font_info = {'color': 'white', 'size': 10, 'weight': 'bold', 'alpha': bb.get_alpha()} + txt = params["class_names_short"][class_id] + font_info = { + "color": "white", + "size": 10, + "weight": "bold", + "alpha": bb.get_alpha(), + } y_pos = bb.get_xy()[1] + bb.get_height() ax0.text(bb.get_xy()[0], y_pos, txt, fontdict=font_info) # plot predicted boxes - ax1.imshow(spec, aspect='auto', cmap='plasma', extent=y_extent) + ax1.imshow(spec, aspect="auto", cmap="plasma", extent=y_extent) ax1.xaxis.set_ticklabels([]) - font_info = {'color': 'white', 'size': 12, 'weight': 'bold'} - ax1.text(0, params['min_freq']//freq_scale, 'Prediction', fontdict=font_info) + font_info = {"color": "white", "size": 12, "weight": "bold"} + ax1.text( + 0, params["min_freq"] // freq_scale, "Prediction", fontdict=font_info + ) plt.grid(False) if plot_boxes: boxes = plot_bounding_box_patch(pred, freq_scale) ax1.add_collection(PatchCollection(boxes, match_original=True)) for ii, bb in enumerate(boxes): - if pred['class_probs'].shape[0] > len(params['class_names_short']): - class_id = pred['class_probs'][:-1, ii].argmax() + if pred["class_probs"].shape[0] > len(params["class_names_short"]): + class_id = pred["class_probs"][:-1, ii].argmax() else: - class_id = pred['class_probs'][:, ii].argmax() - txt = params['class_names_short'][class_id] - font_info = {'color': 'white', 'size': 10, 'weight': 'bold', 'alpha': bb.get_alpha()} + class_id = pred["class_probs"][:, ii].argmax() + txt = params["class_names_short"][class_id] + font_info = { + "color": "white", + "size": 10, + "weight": "bold", + "alpha": bb.get_alpha(), + } y_pos = bb.get_xy()[1] + bb.get_height() ax1.text(bb.get_xy()[0], y_pos, txt, fontdict=font_info) @@ -190,10 +297,18 @@ def plot_spec(spec, sampling_rate, duration, gt, pred, params, plot_title, min_val = 0.0 if pred_2d_hm.min() > 0.0 else pred_2d_hm.min() max_val = 1.0 if pred_2d_hm.max() < 1.0 else pred_2d_hm.max() - ax2.imshow(pred_2d_hm, aspect='auto', cmap='plasma', extent=y_extent, clim=[min_val, max_val]) - #ax2.xaxis.set_ticklabels([]) - font_info = {'color': 'white', 'size': 12, 'weight': 'bold'} - ax2.text(0, params['min_freq']//freq_scale, 'Heatmap', fontdict=font_info) + ax2.imshow( + pred_2d_hm, + aspect="auto", + cmap="plasma", + extent=y_extent, + clim=[min_val, max_val], + ) + # ax2.xaxis.set_ticklabels([]) + font_info = {"color": "white", "size": 12, "weight": "bold"} + ax2.text( + 0, params["min_freq"] // freq_scale, "Heatmap", fontdict=font_info + ) plt.grid(False) @@ -204,107 +319,149 @@ def plot_spec(spec, sampling_rate, duration, gt, pred, params, plot_title, plt.close(1) -def plot_pr_curve(op_dir, plt_title, file_name, results, file_type='png', title_text=''): - precision = results['precision'] - recall = results['recall'] - avg_prec = results['avg_prec'] +def plot_pr_curve( + op_dir, plt_title, file_name, results, file_type="png", title_text="" +): + precision = results["precision"] + recall = results["recall"] + avg_prec = results["avg_prec"] - plt.figure(0, figsize=(10,8)) + plt.figure(0, figsize=(10, 8)) plt.plot(recall, precision) - plt.ylabel('Precision', fontsize=20) - plt.xlabel('Recall', fontsize=20) - if title_text != '': - plt.title(title_text, fontdict={'fontsize': 28}) + plt.ylabel("Precision", fontsize=20) + plt.xlabel("Recall", fontsize=20) + if title_text != "": + plt.title(title_text, fontdict={"fontsize": 28}) else: - plt.title(plt_title + ' {:.3f}\n'.format(avg_prec)) - plt.xlim(0,1.02) - plt.ylim(0,1.02) + plt.title(plt_title + " {:.3f}\n".format(avg_prec)) + plt.xlim(0, 1.02) + plt.ylim(0, 1.02) plt.grid(True) plt.tight_layout() - plt.savefig(op_dir + file_name + '.' + file_type) + plt.savefig(op_dir + file_name + "." + file_type) plt.close(0) -def plot_pr_curve_class(op_dir, plt_title, file_name, results, file_type='png', title_text=''): - plt.figure(0, figsize=(10,8)) - plt.ylabel('Precision', fontsize=20) - plt.xlabel('Recall', fontsize=20) - plt.xlim(0,1.02) - plt.ylim(0,1.02) +def plot_pr_curve_class( + op_dir, plt_title, file_name, results, file_type="png", title_text="" +): + plt.figure(0, figsize=(10, 8)) + plt.ylabel("Precision", fontsize=20) + plt.xlabel("Recall", fontsize=20) + plt.xlim(0, 1.02) + plt.ylim(0, 1.02) plt.grid(True) - linestyles = ['-', ':', '--'] - markers = ['o', 'v', '>', '^', '<', 's', 'P', 'X', '*'] - colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] + linestyles = ["-", ":", "--"] + markers = ["o", "v", ">", "^", "<", "s", "P", "X", "*"] + colors = plt.rcParams["axes.prop_cycle"].by_key()["color"] # plot the PR curves - for ii, rr in enumerate(results['class_pr']): - class_name = ' '.join([sp[:3] for sp in rr['name'].split(' ')]) - cur_color = colors[int(ii%10)] - plt.plot(rr['recall'], rr['precision'], label=class_name, color=cur_color, - linestyle=linestyles[int(ii//10)], lw=2.5) + for ii, rr in enumerate(results["class_pr"]): + class_name = " ".join([sp[:3] for sp in rr["name"].split(" ")]) + cur_color = colors[int(ii % 10)] + plt.plot( + rr["recall"], + rr["precision"], + label=class_name, + color=cur_color, + linestyle=linestyles[int(ii // 10)], + lw=2.5, + ) - #print(class_name) + # print(class_name) # plot the location of the confidence threshold values - for jj, tt in enumerate(rr['thresholds']): - ind = rr['thresholds_inds'][jj] + for jj, tt in enumerate(rr["thresholds"]): + ind = rr["thresholds_inds"][jj] if ind > -1: - plt.plot(rr['recall'][ind], rr['precision'][ind], markers[jj], - color=cur_color, ms=10) - #print(np.round(tt,2), np.round(rr['recall'][ind],3), np.round(rr['precision'][ind],3)) + plt.plot( + rr["recall"][ind], + rr["precision"][ind], + markers[jj], + color=cur_color, + ms=10, + ) + # print(np.round(tt,2), np.round(rr['recall'][ind],3), np.round(rr['precision'][ind],3)) - if title_text != '': - plt.title(title_text, fontdict={'fontsize': 28}) + if title_text != "": + plt.title(title_text, fontdict={"fontsize": 28}) else: - plt.title(plt_title + ' {:.3f}\n'.format(results['avg_prec_class'])) - plt.legend(loc='lower left', prop={'size': 14}) + plt.title(plt_title + " {:.3f}\n".format(results["avg_prec_class"])) + plt.legend(loc="lower left", prop={"size": 14}) plt.tight_layout() - plt.savefig(op_dir + file_name + '.' + file_type) + plt.savefig(op_dir + file_name + "." + file_type) plt.close(0) -def plot_confusion_matrix(op_dir, op_file, gt, pred, file_acc, class_names_long, verbose=False, file_type='png', title_text=''): +def plot_confusion_matrix( + op_dir, + op_file, + gt, + pred, + file_acc, + class_names_long, + verbose=False, + file_type="png", + title_text="", +): # shorten the class names for plotting class_names = [] for cc in class_names_long: - class_name_sm = ''.join([cc_sm[:3] + ' ' for cc_sm in cc.split(' ')])[:-1] + class_name_sm = "".join([cc_sm[:3] + " " for cc_sm in cc.split(" ")])[ + :-1 + ] class_names.append(class_name_sm) num_classes = len(class_names) - cm = confusion_matrix(gt, pred, labels=np.arange(num_classes)).astype(np.float32) + cm = confusion_matrix(gt, pred, labels=np.arange(num_classes)).astype( + np.float32 + ) cm_norm = cm.sum(1) valid_inds = np.where(cm_norm > 0)[0] cm[valid_inds, :] = cm[valid_inds, :] / cm_norm[valid_inds][..., np.newaxis] - cm[np.where(cm_norm ==- 0)[0], :] = np.nan + cm[np.where(cm_norm == -0)[0], :] = np.nan if verbose: - print('Per class accuracy:') + print("Per class accuracy:") str_len = np.max([len(cc) for cc in class_names_long]) + 5 accs = np.diag(cm) for ii, cc in enumerate(class_names_long): if np.isnan(accs[ii]): print(str(ii).ljust(5) + cc.ljust(str_len)) else: - print(str(ii).ljust(5) + cc.ljust(str_len) + '{:.2f}'.format(accs[ii]*100)) + print( + str(ii).ljust(5) + + cc.ljust(str_len) + + "{:.2f}".format(accs[ii] * 100) + ) - plt.figure(0, figsize=(10,8)) - plt.imshow(cm, vmin=0, vmax=1, cmap='plasma') + plt.figure(0, figsize=(10, 8)) + plt.imshow(cm, vmin=0, vmax=1, cmap="plasma") plt.colorbar() - plt.xticks(np.arange(cm.shape[1]), class_names, rotation='vertical') + plt.xticks(np.arange(cm.shape[1]), class_names, rotation="vertical") plt.yticks(np.arange(cm.shape[0]), class_names) - plt.xlabel('Predicted', fontsize=20) - plt.ylabel('Ground Truth', fontsize=20) - if title_text != '': - plt.title(title_text, fontdict={'fontsize': 28}) + plt.xlabel("Predicted", fontsize=20) + plt.ylabel("Ground Truth", fontsize=20) + if title_text != "": + plt.title(title_text, fontdict={"fontsize": 28}) else: - plt.title(op_file + ' {:.3f}\n'.format(file_acc)) + plt.title(op_file + " {:.3f}\n".format(file_acc)) plt.tight_layout() - plt.savefig(op_dir + op_file + '.' + file_type) - plt.close('all') + plt.savefig(op_dir + op_file + "." + file_type) + plt.close("all") class LossPlotter(object): - def __init__(self, op_file_name, duration, labels, ylim, class_names, axis_labels=None, logy=False): + def __init__( + self, + op_file_name, + duration, + labels, + ylim, + class_names, + axis_labels=None, + logy=False, + ): self.reset() self.op_file_name = op_file_name self.duration = duration # length of x axis @@ -327,11 +484,16 @@ class LossPlotter(object): self.save_confusion_matrix(gt, pred) def save_plot(self): - linestyles = ['-', ':', '--'] - plt.figure(0, figsize=(8,5)) + linestyles = ["-", ":", "--"] + plt.figure(0, figsize=(8, 5)) for ii in range(len(self.vals[0])): l_vals = [vv[ii] for vv in self.vals] - plt.plot(self.epochs, l_vals, label=self.labels[ii], linestyle=linestyles[int(ii//10)]) + plt.plot( + self.epochs, + l_vals, + label=self.labels[ii], + linestyle=linestyles[int(ii // 10)], + ) plt.xlim(0, np.maximum(self.duration, len(self.vals))) if self.ylim is not None: plt.ylim(self.ylim[0], self.ylim[1]) @@ -339,33 +501,41 @@ class LossPlotter(object): plt.xlabel(self.axis_labels[0]) plt.ylabel(self.axis_labels[1]) if self.logy: - plt.gca().set_yscale('log') + plt.gca().set_yscale("log") plt.grid(True) - plt.legend(bbox_to_anchor=(1.01, 1), loc='upper left', borderaxespad=0.0) + plt.legend( + bbox_to_anchor=(1.01, 1), loc="upper left", borderaxespad=0.0 + ) plt.tight_layout() plt.savefig(self.op_file_name) plt.close(0) def save_json(self): data = {} - data['epochs'] = self.epochs + data["epochs"] = self.epochs for ii in range(len(self.vals[0])): - data[self.labels[ii]] = [round(vv[ii],4) for vv in self.vals] - with open(self.op_file_name[:-4] + '.json', 'w') as da: + data[self.labels[ii]] = [round(vv[ii], 4) for vv in self.vals] + with open(self.op_file_name[:-4] + ".json", "w") as da: json.dump(data, da, indent=2) def save_confusion_matrix(self, gt, pred): plt.figure(0) - cm = confusion_matrix(gt, pred, np.arange(len(self.class_names))).astype(np.float32) + cm = confusion_matrix( + gt, pred, labels=np.arange(len(self.class_names)) + ).astype(np.float32) cm_norm = cm.sum(1) valid_inds = np.where(cm_norm > 0)[0] - cm[valid_inds, :] = cm[valid_inds, :] / cm_norm[valid_inds][..., np.newaxis] - plt.imshow(cm, vmin=0, vmax=1, cmap='plasma') + cm[valid_inds, :] = ( + cm[valid_inds, :] / cm_norm[valid_inds][..., np.newaxis] + ) + plt.imshow(cm, vmin=0, vmax=1, cmap="plasma") plt.colorbar() - plt.xticks(np.arange(cm.shape[1]), self.class_names, rotation='vertical') + plt.xticks( + np.arange(cm.shape[1]), self.class_names, rotation="vertical" + ) plt.yticks(np.arange(cm.shape[0]), self.class_names) - plt.xlabel('Predicted') - plt.ylabel('Ground Truth') + plt.xlabel("Predicted") + plt.ylabel("Ground Truth") plt.tight_layout() - plt.savefig(self.op_file_name[:-4] + '_cm.png') + plt.savefig(self.op_file_name[:-4] + "_cm.png") plt.close(0) diff --git a/bat_detect/utils/visualize.py b/bat_detect/utils/visualize.py index bea7f6b..d79f322 100644 --- a/bat_detect/utils/visualize.py +++ b/bat_detect/utils/visualize.py @@ -1,19 +1,46 @@ -import numpy as np import matplotlib.pyplot as plt +import numpy as np from matplotlib import patches -from sklearn.svm import LinearSVC from matplotlib.axes._axes import _log as matplotlib_axes_logger -matplotlib_axes_logger.setLevel('ERROR') +from sklearn.svm import LinearSVC + +matplotlib_axes_logger.setLevel("ERROR") -colors = ['#e6194B', '#3cb44b', '#ffe119', '#4363d8', '#f58231', '#911eb4', - '#42d4f4', '#f032e6', '#bfef45', '#fabebe', '#469990', '#e6beff', - '#9A6324', '#fffac8', '#800000', '#aaffc3', '#808000', '#ffd8b1', - '#000075', '#a9a9a9'] +colors = [ + "#e6194B", + "#3cb44b", + "#ffe119", + "#4363d8", + "#f58231", + "#911eb4", + "#42d4f4", + "#f032e6", + "#bfef45", + "#fabebe", + "#469990", + "#e6beff", + "#9A6324", + "#fffac8", + "#800000", + "#aaffc3", + "#808000", + "#ffd8b1", + "#000075", + "#a9a9a9", +] class InteractivePlotter: - def __init__(self, feats_ds, feats, spec_slices, call_info, freq_lims, allow_training): + def __init__( + self, + feats_ds, + feats, + spec_slices, + call_info, + freq_lims, + allow_training, + ): """ Plots 2D low dimensional features on left and corresponding spectgrams on the right. @@ -24,78 +51,123 @@ class InteractivePlotter: self.spec_slices = spec_slices self.call_info = call_info - #_, self.labels = np.unique([cc['class'] for cc in call_info], return_inverse=True) + # _, self.labels = np.unique([cc['class'] for cc in call_info], return_inverse=True) self.labels = np.zeros(len(call_info), dtype=np.int) - self.annotated = np.zeros(self.labels.shape[0], dtype=np.int) # can populate this with 1's where we have labels - self.labels_cols = [colors[self.labels[ii]] for ii in range(len(self.labels))] + self.annotated = np.zeros( + self.labels.shape[0], dtype=np.int + ) # can populate this with 1's where we have labels + self.labels_cols = [ + colors[self.labels[ii]] for ii in range(len(self.labels)) + ] self.freq_lims = freq_lims self.allow_training = allow_training self.pt_size = 5.0 - self.spec_pad = 0.2 # this much padding has been applied to the spec slices + self.spec_pad = ( + 0.2 # this much padding has been applied to the spec slices + ) self.fig_width = 12 self.fig_height = 8 self.current_id = 0 max_ind = np.argmax([ss.shape[1] for ss in self.spec_slices]) self.max_width = self.spec_slices[max_ind].shape[1] - self.blank_spec = np.zeros((self.spec_slices[0].shape[0], self.max_width)) - + self.blank_spec = np.zeros( + (self.spec_slices[0].shape[0], self.max_width) + ) def plot(self, fig_id): - self.fig, self.ax = plt.subplots(nrows=1, ncols=2, num=fig_id, figsize=(self.fig_width, self.fig_height), - gridspec_kw={'width_ratios': [2, 1]}) + self.fig, self.ax = plt.subplots( + nrows=1, + ncols=2, + num=fig_id, + figsize=(self.fig_width, self.fig_height), + gridspec_kw={"width_ratios": [2, 1]}, + ) plt.tight_layout() # plot 2D TNSE features - self.low_dim_plt = self.ax[0].scatter(self.feats_ds[:, 0], self.feats_ds[:, 1], - c=self.labels_cols, s=self.pt_size, picker=5) - self.ax[0].set_title('TSNE of Call Features') + self.low_dim_plt = self.ax[0].scatter( + self.feats_ds[:, 0], + self.feats_ds[:, 1], + c=self.labels_cols, + s=self.pt_size, + picker=5, + ) + self.ax[0].set_title("TSNE of Call Features") self.ax[0].set_xticks([]) self.ax[0].set_yticks([]) # plot clip from spectrogram - spec_min_max = (0, self.blank_spec.shape[1], self.freq_lims[0], self.freq_lims[1]) - self.ax[1].imshow(self.blank_spec, extent=spec_min_max, cmap='plasma', aspect='auto') + spec_min_max = ( + 0, + self.blank_spec.shape[1], + self.freq_lims[0], + self.freq_lims[1], + ) + self.ax[1].imshow( + self.blank_spec, extent=spec_min_max, cmap="plasma", aspect="auto" + ) self.spec_im = self.ax[1].get_images()[0] - self.ax[1].set_title('Spectrogram') - self.ax[1].grid(color='w', linewidth=0.5) + self.ax[1].set_title("Spectrogram") + self.ax[1].grid(color="w", linewidth=0.5) self.ax[1].set_xticks([]) - self.ax[1].set_ylabel('kHz') + self.ax[1].set_ylabel("kHz") - bbox_orig = patches.Rectangle((0,0),0,0, edgecolor='w', linewidth=0, fill=False) + bbox_orig = patches.Rectangle( + (0, 0), 0, 0, edgecolor="w", linewidth=0, fill=False + ) self.ax[1].add_patch(bbox_orig) - self.annot = self.ax[0].annotate('', xy=(0,0), xytext=(20,20),textcoords='offset points', - bbox=dict(boxstyle='round', fc='w'), arrowprops=dict(arrowstyle='->')) + self.annot = self.ax[0].annotate( + "", + xy=(0, 0), + xytext=(20, 20), + textcoords="offset points", + bbox=dict(boxstyle="round", fc="w"), + arrowprops=dict(arrowstyle="->"), + ) self.annot.set_visible(False) - self.fig.canvas.mpl_connect('motion_notify_event', self.mouse_hover) - self.fig.canvas.mpl_connect('key_press_event', self.key_press) - + self.fig.canvas.mpl_connect("motion_notify_event", self.mouse_hover) + self.fig.canvas.mpl_connect("key_press_event", self.key_press) def mouse_hover(self, event): vis = self.annot.get_visible() if event.inaxes == self.ax[0]: cont, ind = self.low_dim_plt.contains(event) if cont: - self.current_id = ind['ind'][0] + self.current_id = ind["ind"][0] # copy spec into full window - probably a better way of doing this new_spec = self.blank_spec.copy() - w_diff = (self.blank_spec.shape[1] - self.spec_slices[self.current_id].shape[1])//2 - new_spec[:, w_diff:self.spec_slices[self.current_id].shape[1]+w_diff] = self.spec_slices[self.current_id] + w_diff = ( + self.blank_spec.shape[1] + - self.spec_slices[self.current_id].shape[1] + ) // 2 + new_spec[ + :, + w_diff : self.spec_slices[self.current_id].shape[1] + + w_diff, + ] = self.spec_slices[self.current_id] self.spec_im.set_data(new_spec) self.spec_im.set_clim(vmin=0, vmax=new_spec.max()) # draw bounding box around call self.ax[1].patches[0].remove() - spec_width_orig = self.spec_slices[self.current_id].shape[1]/(1.0+2.0*self.spec_pad) - xx = w_diff + self.spec_pad*spec_width_orig + spec_width_orig = self.spec_slices[self.current_id].shape[1] / ( + 1.0 + 2.0 * self.spec_pad + ) + xx = w_diff + self.spec_pad * spec_width_orig ww = spec_width_orig - yy = self.call_info[self.current_id]['low_freq']/1000 - hh = (self.call_info[self.current_id]['high_freq']-self.call_info[self.current_id]['low_freq'])/1000 - bbox = patches.Rectangle((xx,yy),ww,hh, edgecolor='r', linewidth=0.5, fill=False) + yy = self.call_info[self.current_id]["low_freq"] / 1000 + hh = ( + self.call_info[self.current_id]["high_freq"] + - self.call_info[self.current_id]["low_freq"] + ) / 1000 + bbox = patches.Rectangle( + (xx, yy), ww, hh, edgecolor="r", linewidth=0.5, fill=False + ) self.ax[1].add_patch(bbox) # update annotation arrow @@ -104,38 +176,52 @@ class InteractivePlotter: self.annot.set_visible(True) # write call info - info_str = self.call_info[self.current_id]['file_name'] + ', time=' \ - + str(round(self.call_info[self.current_id]['start_time'],3)) \ - + ', prob=' + str(round(self.call_info[self.current_id]['det_prob'],3)) + info_str = ( + self.call_info[self.current_id]["file_name"] + + ", time=" + + str( + round(self.call_info[self.current_id]["start_time"], 3) + ) + + ", prob=" + + str(round(self.call_info[self.current_id]["det_prob"], 3)) + ) self.ax[0].set_xlabel(info_str) # redraw self.fig.canvas.draw_idle() - def key_press(self, event): if event.key.isdigit(): self.labels_cols[self.current_id] = colors[int(event.key)] self.labels[self.current_id] = int(event.key) self.annotated[self.current_id] = 1 - elif event.key == 'enter' and self.allow_training: + elif event.key == "enter" and self.allow_training: self.train_classifier() - elif event.key == 'x' and self.allow_training: + elif event.key == "x" and self.allow_training: self.get_classifier_params() - self.ax[0].scatter(self.feats_ds[:, 0], self.feats_ds[:, 1], - c=self.labels_cols, s=self.pt_size) + self.ax[0].scatter( + self.feats_ds[:, 0], + self.feats_ds[:, 1], + c=self.labels_cols, + s=self.pt_size, + ) self.fig.canvas.draw_idle() - def train_classifier(self): # TODO maybe it's better to classify in 2D space - but then can't be linear ... inds = np.where(self.annotated == 1)[0] labs_un, labs_inds = np.unique(self.labels[inds], return_inverse=True) if labs_un.shape[0] > 1: # needs at least 2 classes - self.clf = LinearSVC(C=1.0, penalty='l2', loss='squared_hinge', tol=0.0001, - intercept_scaling=1.0, max_iter=2000) + self.clf = LinearSVC( + C=1.0, + penalty="l2", + loss="squared_hinge", + tol=0.0001, + intercept_scaling=1.0, + max_iter=2000, + ) self.clf.fit(self.feats[inds, :], self.labels[inds]) @@ -145,14 +231,13 @@ class InteractivePlotter: for ii in inds_unlab: self.labels_cols[ii] = colors[self.labels[ii]] else: - print('Not enough data - please label more classes.') - + print("Not enough data - please label more classes.") def get_classifier_params(self): res = {} if self.clf is None: - print('Model not trained!') + print("Model not trained!") else: - res['weights'] = self.clf.coef_.astype(np.float32) - res['biases'] = self.clf.intercept_.astype(np.float32) + res["weights"] = self.clf.coef_.astype(np.float32) + res["biases"] = self.clf.intercept_.astype(np.float32) return res diff --git a/bat_detect/utils/wavfile.py b/bat_detect/utils/wavfile.py index a6715b0..7fee660 100644 --- a/bat_detect/utils/wavfile.py +++ b/bat_detect/utils/wavfile.py @@ -8,23 +8,25 @@ Functions `write`: Write a numpy array as a WAV file. """ -from __future__ import division, print_function, absolute_import +from __future__ import absolute_import, division, print_function -import sys -import numpy -import struct -import warnings import os +import struct +import sys +import warnings + +import numpy class WavFileWarning(UserWarning): pass + _big_endian = False WAVE_FORMAT_PCM = 0x0001 WAVE_FORMAT_IEEE_FLOAT = 0x0003 -WAVE_FORMAT_EXTENSIBLE = 0xfffe +WAVE_FORMAT_EXTENSIBLE = 0xFFFE KNOWN_WAVE_FORMATS = (WAVE_FORMAT_PCM, WAVE_FORMAT_IEEE_FLOAT) # assumes file pointer is immediately @@ -33,10 +35,10 @@ KNOWN_WAVE_FORMATS = (WAVE_FORMAT_PCM, WAVE_FORMAT_IEEE_FLOAT) def _read_fmt_chunk(fid): if _big_endian: - fmt = '>' + fmt = ">" else: - fmt = '<' - res = struct.unpack(fmt+'iHHIIHH',fid.read(20)) + fmt = "<" + res = struct.unpack(fmt + "iHHIIHH", fid.read(20)) size, comp, noc, rate, sbytes, ba, bits = res if comp not in KNOWN_WAVE_FORMATS or size > 16: comp = WAVE_FORMAT_PCM @@ -51,41 +53,42 @@ def _read_fmt_chunk(fid): # after the 'data' id def _read_data_chunk(fid, comp, noc, bits, mmap=False): if _big_endian: - fmt = '>i' + fmt = ">i" else: - fmt = ' 1: - data = data.reshape(-1,noc) + data = data.reshape(-1, noc) return data def _skip_unknown_chunk(fid): if _big_endian: - fmt = '>i' + fmt = ">i" else: - fmt = '' or (data.dtype.byteorder == '=' and sys.byteorder == 'big'): + fid.write(b"data") + fid.write(struct.pack("" or ( + data.dtype.byteorder == "=" and sys.byteorder == "big" + ): data = data.byteswap() _array_tofile(fid, data) @@ -273,19 +286,22 @@ def write(filename, rate, data): # position at start of the file (replacing the 4 bytes of zeros) size = fid.tell() fid.seek(4) - fid.write(struct.pack('= 3: + def _array_tofile(fid, data): # ravel gives a c-contiguous buffer - fid.write(data.ravel().view('b').data) + fid.write(data.ravel().view("b").data) + else: + def _array_tofile(fid, data): fid.write(data.tostring()) diff --git a/batdetect2_notebook.ipynb b/batdetect2_notebook.ipynb index 035affd..d2b8c56 100644 --- a/batdetect2_notebook.ipynb +++ b/batdetect2_notebook.ipynb @@ -56,9 +56,9 @@ "source": [ "# setup the arguments\n", "args = du.get_default_bd_args()\n", - "args['detection_threshold'] = 0.3\n", - "args['time_expansion_factor'] = 1\n", - "args['model_path'] = 'models/Net2DFast_UK_same.pth.tar'\n", + "args[\"detection_threshold\"] = 0.3\n", + "args[\"time_expansion_factor\"] = 1\n", + "args[\"model_path\"] = \"models/Net2DFast_UK_same.pth.tar\"\n", "max_duration = 2.0" ] }, @@ -69,7 +69,7 @@ "outputs": [], "source": [ "# load the model\n", - "model, params = du.load_model(args['model_path'])" + "model, params = du.load_model(args[\"model_path\"])" ] }, { @@ -86,13 +86,13 @@ "outputs": [], "source": [ "# choose an audio file\n", - "audio_file = 'example_data/audio/20170701_213954-MYOMYS-LR_0_0.5.wav'\n", + "audio_file = \"example_data/audio/20170701_213954-MYOMYS-LR_0_0.5.wav\"\n", "\n", "# the following lines are only needed in Colab\n", "# alternatively you can upload your own file\n", - "#from google.colab import files\n", - "#uploaded = files.upload()\n", - "#audio_file = list(uploaded.keys())[0]" + "# from google.colab import files\n", + "# uploaded = files.upload()\n", + "# audio_file = list(uploaded.keys())[0]" ] }, { @@ -102,7 +102,9 @@ "outputs": [], "source": [ "# run the model\n", - "results = du.process_file(audio_file, model, params, args, max_duration=max_duration)" + "results = du.process_file(\n", + " audio_file, model, params, args, max_duration=max_duration\n", + ")" ] }, { @@ -144,13 +146,17 @@ } ], "source": [ - "# print summary info for the individual detections \n", - "print('Results for ' + results['pred_dict']['id'])\n", - "print('{} calls detected\\n'.format(len(results['pred_dict']['annotation'])))\n", + "# print summary info for the individual detections\n", + "print(\"Results for \" + results[\"pred_dict\"][\"id\"])\n", + "print(\"{} calls detected\\n\".format(len(results[\"pred_dict\"][\"annotation\"])))\n", "\n", - "print('time\\tprob\\tlfreq\\tspecies_name')\n", - "for ann in results['pred_dict']['annotation']:\n", - " print('{}\\t{}\\t{}\\t{}'.format(ann['start_time'], ann['class_prob'], ann['low_freq'], ann['class']))" + "print(\"time\\tprob\\tlfreq\\tspecies_name\")\n", + "for ann in results[\"pred_dict\"][\"annotation\"]:\n", + " print(\n", + " \"{}\\t{}\\t{}\\t{}\".format(\n", + " ann[\"start_time\"], ann[\"class_prob\"], ann[\"low_freq\"], ann[\"class\"]\n", + " )\n", + " )" ] }, { @@ -174,10 +180,16 @@ } ], "source": [ - "# read the audio file \n", - "sampling_rate, audio = au.load_audio_file(audio_file, args['time_expansion_factor'], params['target_samp_rate'], params['scale_raw_audio'], max_duration=max_duration)\n", + "# read the audio file\n", + "sampling_rate, audio = au.load_audio_file(\n", + " audio_file,\n", + " args[\"time_expansion_factor\"],\n", + " params[\"target_samp_rate\"],\n", + " params[\"scale_raw_audio\"],\n", + " max_duration=max_duration,\n", + ")\n", "duration = audio.shape[0] / sampling_rate\n", - "print('File duration: {} seconds'.format(duration))" + "print(\"File duration: {} seconds\".format(duration))" ] }, { @@ -187,7 +199,9 @@ "outputs": [], "source": [ "# generate spectrogram for visualization\n", - "spec, spec_viz = au.generate_spectrogram(audio, sampling_rate, params, True, False)" + "spec, spec_viz = au.generate_spectrogram(\n", + " audio, sampling_rate, params, True, False\n", + ")" ] }, { @@ -210,12 +224,33 @@ "# display the detections on top of the spectrogram\n", "# note, if the audio file is very long, this image will be very large - best to crop the audio first\n", "start_time = 0.0\n", - "detections = [ann for ann in results['pred_dict']['annotation']]\n", - "fig = plt.figure(1, figsize=(spec.shape[1]/100, spec.shape[0]/100), dpi=100, frameon=False)\n", - "spec_duration = au.x_coords_to_time(spec.shape[1], sampling_rate, params['fft_win_length'], params['fft_overlap'])\n", - "viz.create_box_image(spec, fig, detections, start_time, start_time+spec_duration, spec_duration, params, spec.max()*1.1, False, True)\n", - "plt.ylabel('Freq - kHz')\n", - "plt.xlabel('Time - secs')\n", + "detections = [ann for ann in results[\"pred_dict\"][\"annotation\"]]\n", + "fig = plt.figure(\n", + " 1,\n", + " figsize=(spec.shape[1] / 100, spec.shape[0] / 100),\n", + " dpi=100,\n", + " frameon=False,\n", + ")\n", + "spec_duration = au.x_coords_to_time(\n", + " spec.shape[1],\n", + " sampling_rate,\n", + " params[\"fft_win_length\"],\n", + " params[\"fft_overlap\"],\n", + ")\n", + "viz.create_box_image(\n", + " spec,\n", + " fig,\n", + " detections,\n", + " start_time,\n", + " start_time + spec_duration,\n", + " spec_duration,\n", + " params,\n", + " spec.max() * 1.1,\n", + " False,\n", + " True,\n", + ")\n", + "plt.ylabel(\"Freq - kHz\")\n", + "plt.xlabel(\"Time - secs\")\n", "plt.title(os.path.basename(audio_file))\n", "plt.show()" ] diff --git a/pdm.lock b/pdm.lock new file mode 100644 index 0000000..68480c5 --- /dev/null +++ b/pdm.lock @@ -0,0 +1,1337 @@ +# This file is @generated by PDM. +# It is not intended for manual editing. + +[[package]] +name = "appdirs" +version = "1.4.4" +summary = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." + +[[package]] +name = "attrs" +version = "22.2.0" +requires_python = ">=3.6" +summary = "Classes Without Boilerplate" + +[[package]] +name = "audioread" +version = "3.0.0" +requires_python = ">=3.6" +summary = "multi-library, cross-platform audio decoding" + +[[package]] +name = "certifi" +version = "2022.12.7" +requires_python = ">=3.6" +summary = "Python package for providing Mozilla's CA Bundle." + +[[package]] +name = "cffi" +version = "1.15.1" +summary = "Foreign Function Interface for Python calling C code." +dependencies = [ + "pycparser", +] + +[[package]] +name = "charset-normalizer" +version = "3.1.0" +requires_python = ">=3.7.0" +summary = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." + +[[package]] +name = "click" +version = "8.1.3" +requires_python = ">=3.7" +summary = "Composable command line interface toolkit" +dependencies = [ + "colorama; platform_system == \"Windows\"", +] + +[[package]] +name = "colorama" +version = "0.4.6" +requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +summary = "Cross-platform colored terminal text." + +[[package]] +name = "contourpy" +version = "1.0.7" +requires_python = ">=3.8" +summary = "Python library for calculating contours of 2D quadrilateral grids" +dependencies = [ + "numpy>=1.16", +] + +[[package]] +name = "cycler" +version = "0.11.0" +requires_python = ">=3.6" +summary = "Composable style cycles" + +[[package]] +name = "decorator" +version = "5.1.1" +requires_python = ">=3.5" +summary = "Decorators for Humans" + +[[package]] +name = "exceptiongroup" +version = "1.1.1" +requires_python = ">=3.7" +summary = "Backport of PEP 654 (exception groups)" + +[[package]] +name = "fonttools" +version = "4.39.3" +requires_python = ">=3.8" +summary = "Tools to manipulate font files" + +[[package]] +name = "idna" +version = "3.4" +requires_python = ">=3.5" +summary = "Internationalized Domain Names in Applications (IDNA)" + +[[package]] +name = "importlib-metadata" +version = "6.1.0" +requires_python = ">=3.7" +summary = "Read metadata from Python packages" +dependencies = [ + "zipp>=0.5", +] + +[[package]] +name = "importlib-resources" +version = "5.12.0" +requires_python = ">=3.7" +summary = "Read resources from Python packages" +dependencies = [ + "zipp>=3.1.0; python_version < \"3.10\"", +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +requires_python = ">=3.7" +summary = "brain-dead simple config-ini parsing" + +[[package]] +name = "joblib" +version = "1.2.0" +requires_python = ">=3.7" +summary = "Lightweight pipelining with Python functions" + +[[package]] +name = "kiwisolver" +version = "1.4.4" +requires_python = ">=3.7" +summary = "A fast implementation of the Cassowary constraint solver" + +[[package]] +name = "lazy-loader" +version = "0.2" +requires_python = ">=3.7" +summary = "lazy_loader" + +[[package]] +name = "librosa" +version = "0.10.0.post2" +requires_python = ">=3.7" +summary = "Python module for audio and music processing" +dependencies = [ + "audioread>=2.1.9", + "decorator>=4.3.0", + "joblib>=0.14", + "lazy-loader>=0.1", + "msgpack>=1.0", + "numba>=0.51.0", + "numpy!=1.22.0,!=1.22.1,!=1.22.2,>=1.20.3", + "pooch<1.7,>=1.0", + "scikit-learn>=0.20.0", + "scipy>=1.2.0", + "soundfile>=0.12.1", + "soxr>=0.3.2", + "typing-extensions>=4.1.1", +] + +[[package]] +name = "llvmlite" +version = "0.39.1" +requires_python = ">=3.7" +summary = "lightweight wrapper around basic LLVM functionality" + +[[package]] +name = "matplotlib" +version = "3.7.1" +requires_python = ">=3.8" +summary = "Python plotting package" +dependencies = [ + "contourpy>=1.0.1", + "cycler>=0.10", + "fonttools>=4.22.0", + "importlib-resources>=3.2.0; python_version < \"3.10\"", + "kiwisolver>=1.0.1", + "numpy>=1.20", + "packaging>=20.0", + "pillow>=6.2.0", + "pyparsing>=2.3.1", + "python-dateutil>=2.7", +] + +[[package]] +name = "msgpack" +version = "1.0.5" +summary = "MessagePack serializer" + +[[package]] +name = "numba" +version = "0.56.4" +requires_python = ">=3.7" +summary = "compiling Python code using LLVM" +dependencies = [ + "importlib-metadata; python_version < \"3.9\"", + "llvmlite<0.40,>=0.39.0dev0", + "numpy<1.24,>=1.18", + "setuptools", +] + +[[package]] +name = "numpy" +version = "1.23.5" +requires_python = ">=3.8" +summary = "NumPy is the fundamental package for array computing with Python." + +[[package]] +name = "nvidia-cublas-cu11" +version = "11.10.3.66" +requires_python = ">=3" +summary = "CUBLAS native runtime libraries" +dependencies = [ + "setuptools", + "wheel", +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu11" +version = "11.7.99" +requires_python = ">=3" +summary = "NVRTC native runtime libraries" + +[[package]] +name = "nvidia-cuda-runtime-cu11" +version = "11.7.99" +requires_python = ">=3" +summary = "CUDA Runtime native Libraries" +dependencies = [ + "setuptools", + "wheel", +] + +[[package]] +name = "nvidia-cudnn-cu11" +version = "8.5.0.96" +requires_python = ">=3" +summary = "cuDNN runtime libraries" +dependencies = [ + "nvidia-cublas-cu11", +] + +[[package]] +name = "packaging" +version = "23.0" +requires_python = ">=3.7" +summary = "Core utilities for Python packages" + +[[package]] +name = "pandas" +version = "1.5.3" +requires_python = ">=3.8" +summary = "Powerful data structures for data analysis, time series, and statistics" +dependencies = [ + "numpy>=1.20.3; python_version < \"3.10\"", + "numpy>=1.21.0; python_version >= \"3.10\"", + "python-dateutil>=2.8.1", + "pytz>=2020.1", +] + +[[package]] +name = "pillow" +version = "9.4.0" +requires_python = ">=3.7" +summary = "Python Imaging Library (Fork)" + +[[package]] +name = "pluggy" +version = "1.0.0" +requires_python = ">=3.6" +summary = "plugin and hook calling mechanisms for python" + +[[package]] +name = "pooch" +version = "1.6.0" +requires_python = ">=3.6" +summary = "\"Pooch manages your Python library's sample data files: it automatically downloads and stores them in a local directory, with support for versioning and corruption checks.\"" +dependencies = [ + "appdirs>=1.3.0", + "packaging>=20.0", + "requests>=2.19.0", +] + +[[package]] +name = "pycparser" +version = "2.21" +requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +summary = "C parser in Python" + +[[package]] +name = "pyparsing" +version = "3.0.9" +requires_python = ">=3.6.8" +summary = "pyparsing module - Classes and methods to define and execute parsing grammars" + +[[package]] +name = "pytest" +version = "7.2.2" +requires_python = ">=3.7" +summary = "pytest: simple powerful testing with Python" +dependencies = [ + "attrs>=19.2.0", + "colorama; sys_platform == \"win32\"", + "exceptiongroup>=1.0.0rc8; python_version < \"3.11\"", + "iniconfig", + "packaging", + "pluggy<2.0,>=0.12", + "tomli>=1.0.0; python_version < \"3.11\"", +] + +[[package]] +name = "python-dateutil" +version = "2.8.2" +requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +summary = "Extensions to the standard Python datetime module" +dependencies = [ + "six>=1.5", +] + +[[package]] +name = "pytz" +version = "2023.3" +summary = "World timezone definitions, modern and historical" + +[[package]] +name = "requests" +version = "2.28.2" +requires_python = ">=3.7, <4" +summary = "Python HTTP for Humans." +dependencies = [ + "certifi>=2017.4.17", + "charset-normalizer<4,>=2", + "idna<4,>=2.5", + "urllib3<1.27,>=1.21.1", +] + +[[package]] +name = "scikit-learn" +version = "1.2.2" +requires_python = ">=3.8" +summary = "A set of python modules for machine learning and data mining" +dependencies = [ + "joblib>=1.1.1", + "numpy>=1.17.3", + "scipy>=1.3.2", + "threadpoolctl>=2.0.0", +] + +[[package]] +name = "scipy" +version = "1.9.3" +requires_python = ">=3.8" +summary = "Fundamental algorithms for scientific computing in Python" +dependencies = [ + "numpy<1.26.0,>=1.18.5", +] + +[[package]] +name = "setuptools" +version = "67.6.1" +requires_python = ">=3.7" +summary = "Easily download, build, install, upgrade, and uninstall Python packages" + +[[package]] +name = "six" +version = "1.16.0" +requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +summary = "Python 2 and 3 compatibility utilities" + +[[package]] +name = "soundfile" +version = "0.12.1" +summary = "An audio library based on libsndfile, CFFI and NumPy" +dependencies = [ + "cffi>=1.0", +] + +[[package]] +name = "soxr" +version = "0.3.4" +requires_python = ">=3.6" +summary = "High quality, one-dimensional sample-rate conversion library" +dependencies = [ + "numpy", +] + +[[package]] +name = "threadpoolctl" +version = "3.1.0" +requires_python = ">=3.6" +summary = "threadpoolctl" + +[[package]] +name = "tomli" +version = "2.0.1" +requires_python = ">=3.7" +summary = "A lil' TOML parser" + +[[package]] +name = "torch" +version = "1.13.1" +requires_python = ">=3.7.0" +summary = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +dependencies = [ + "nvidia-cublas-cu11==11.10.3.66; platform_system == \"Linux\"", + "nvidia-cuda-nvrtc-cu11==11.7.99; platform_system == \"Linux\"", + "nvidia-cuda-runtime-cu11==11.7.99; platform_system == \"Linux\"", + "nvidia-cudnn-cu11==8.5.0.96; platform_system == \"Linux\"", + "typing-extensions", +] + +[[package]] +name = "torchaudio" +version = "0.13.1" +summary = "An audio package for PyTorch" +dependencies = [ + "torch==1.13.1", +] + +[[package]] +name = "torchvision" +version = "0.14.1" +requires_python = ">=3.7" +summary = "image and video datasets and models for torch deep learning" +dependencies = [ + "numpy", + "pillow!=8.3.*,>=5.3.0", + "requests", + "torch==1.13.1", + "typing-extensions", +] + +[[package]] +name = "typing-extensions" +version = "4.5.0" +requires_python = ">=3.7" +summary = "Backported and Experimental Type Hints for Python 3.7+" + +[[package]] +name = "urllib3" +version = "1.26.15" +requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +summary = "HTTP library with thread-safe connection pooling, file post, and more." + +[[package]] +name = "wheel" +version = "0.40.0" +requires_python = ">=3.7" +summary = "A built-package format for Python" + +[[package]] +name = "zipp" +version = "3.15.0" +requires_python = ">=3.7" +summary = "Backport of pathlib-compatible object wrapper for zip files" + +[metadata] +lock_version = "4.1" +content_hash = "sha256:2401b930c14b3b7e107372f0103cccebff74691b6bcd54148d832ce847df5673" + +[metadata.files] +"appdirs 1.4.4" = [ + {url = "https://files.pythonhosted.org/packages/3b/00/2344469e2084fb287c2e0b57b72910309874c3245463acd6cf5e3db69324/appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, + {url = "https://files.pythonhosted.org/packages/d7/d8/05696357e0311f5b5c316d7b95f46c669dd9c15aaeecbb48c7d0aeb88c40/appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, +] +"attrs 22.2.0" = [ + {url = "https://files.pythonhosted.org/packages/21/31/3f468da74c7de4fcf9b25591e682856389b3400b4b62f201e65f15ea3e07/attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"}, + {url = "https://files.pythonhosted.org/packages/fb/6e/6f83bf616d2becdf333a1640f1d463fef3150e2e926b7010cb0f81c95e88/attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"}, +] +"audioread 3.0.0" = [ + {url = "https://files.pythonhosted.org/packages/5d/cb/82a002441902dccbe427406785db07af10182245ee639ea9f4d92907c923/audioread-3.0.0.tar.gz", hash = "sha256:121995bd207eb1fda3d566beb851d3534275925bc35a4fb6da0cb11de0f7251a"}, +] +"certifi 2022.12.7" = [ + {url = "https://files.pythonhosted.org/packages/37/f7/2b1b0ec44fdc30a3d31dfebe52226be9ddc40cd6c0f34ffc8923ba423b69/certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, + {url = "https://files.pythonhosted.org/packages/71/4c/3db2b8021bd6f2f0ceb0e088d6b2d49147671f25832fb17970e9b583d742/certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, +] +"cffi 1.15.1" = [ + {url = "https://files.pythonhosted.org/packages/00/05/23a265a3db411b0bfb721bf7a116c7cecaf3eb37ebd48a6ea4dfb0a3244d/cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, + {url = "https://files.pythonhosted.org/packages/03/7b/259d6e01a6083acef9d3c8c88990c97d313632bb28fa84d6ab2bb201140a/cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, + {url = "https://files.pythonhosted.org/packages/0e/65/0d7b5dad821ced4dcd43f96a362905a68ce71e6b5f5cfd2fada867840582/cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, + {url = "https://files.pythonhosted.org/packages/0e/e2/a23af3d81838c577571da4ff01b799b0c2bbde24bd924d97e228febae810/cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, + {url = "https://files.pythonhosted.org/packages/10/72/617ee266192223a38b67149c830bd9376b69cf3551e1477abc72ff23ef8e/cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, + {url = "https://files.pythonhosted.org/packages/18/8f/5ff70c7458d61fa8a9752e5ee9c9984c601b0060aae0c619316a1e1f1ee5/cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, + {url = "https://files.pythonhosted.org/packages/1d/76/bcebbbab689f5f6fc8a91e361038a3001ee2e48c5f9dbad0a3b64a64cc9e/cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, + {url = "https://files.pythonhosted.org/packages/22/c6/df826563f55f7e9dd9a1d3617866282afa969fe0d57decffa1911f416ed8/cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, + {url = "https://files.pythonhosted.org/packages/23/8b/2e8c2469eaf89f7273ac685164949a7e644cdfe5daf1c036564208c3d26b/cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, + {url = "https://files.pythonhosted.org/packages/2b/a8/050ab4f0c3d4c1b8aaa805f70e26e84d0e27004907c5b8ecc1d31815f92a/cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, + {url = "https://files.pythonhosted.org/packages/2d/86/3ca57cddfa0419f6a95d1c8478f8f622ba597e3581fd501bbb915b20eb75/cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, + {url = "https://files.pythonhosted.org/packages/2e/7a/68c35c151e5b7a12650ecc12fdfb85211aa1da43e9924598451c4a0a3839/cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, + {url = "https://files.pythonhosted.org/packages/32/2a/63cb8c07d151de92ff9d897b2eb27ba6a0e78dda8e4c5f70d7b8c16cd6a2/cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, + {url = "https://files.pythonhosted.org/packages/32/bd/d0809593f7976828f06a492716fbcbbfb62798bbf60ea1f65200b8d49901/cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, + {url = "https://files.pythonhosted.org/packages/37/5a/c37631a86be838bdd84cc0259130942bf7e6e32f70f4cab95f479847fb91/cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, + {url = "https://files.pythonhosted.org/packages/3a/12/d6066828014b9ccb2bbb8e1d9dc28872d20669b65aeb4a86806a0757813f/cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, + {url = "https://files.pythonhosted.org/packages/3a/75/a162315adeaf47e94a3b7f886a8e31d77b9e525a387eef2d6f0efc96a7c8/cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, + {url = "https://files.pythonhosted.org/packages/3f/fa/dfc242febbff049509e5a35a065bdc10f90d8c8585361c2c66b9c2f97a01/cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, + {url = "https://files.pythonhosted.org/packages/43/a0/cc7370ef72b6ee586369bacd3961089ab3d94ae712febf07a244f1448ffd/cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, + {url = "https://files.pythonhosted.org/packages/47/51/3049834f07cd89aceef27f9c56f5394ca6725ae6a15cff5fbdb2f06a24ad/cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, + {url = "https://files.pythonhosted.org/packages/47/97/137f0e3d2304df2060abb872a5830af809d7559a5a4b6a295afb02728e65/cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, + {url = "https://files.pythonhosted.org/packages/50/34/4cc590ad600869502c9838b4824982c122179089ed6791a8b1c95f0ff55e/cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, + {url = "https://files.pythonhosted.org/packages/5b/1a/e1ee5bed11d8b6540c05a8e3c32448832d775364d4461dd6497374533401/cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, + {url = "https://files.pythonhosted.org/packages/5d/4e/4e0bb5579b01fdbfd4388bd1eb9394a989e1336203a4b7f700d887b233c1/cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, + {url = "https://files.pythonhosted.org/packages/5d/6f/3a2e167113eabd46ed300ff3a6a1e9277a3ad8b020c4c682f83e9326fcf7/cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, + {url = "https://files.pythonhosted.org/packages/69/bf/335f8d95510b1a26d7c5220164dc739293a71d5540ecd54a2f66bac3ecb8/cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, + {url = "https://files.pythonhosted.org/packages/71/d7/0fe0d91b0bbf610fb7254bb164fa8931596e660d62e90fb6289b7ee27b09/cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, + {url = "https://files.pythonhosted.org/packages/77/b7/d3618d612be01e184033eab90006f8ca5b5edafd17bf247439ea4e167d8a/cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, + {url = "https://files.pythonhosted.org/packages/79/4b/33494eb0adbcd884656c48f6db0c98ad8a5c678fb8fb5ed41ab546b04d8c/cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, + {url = "https://files.pythonhosted.org/packages/7c/3e/5d823e5bbe00285e479034bcad44177b7353ec9fdcd7795baac5ccf82950/cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, + {url = "https://files.pythonhosted.org/packages/85/1f/a3c533f8d377da5ca7edb4f580cc3edc1edbebc45fac8bb3ae60f1176629/cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, + {url = "https://files.pythonhosted.org/packages/87/4b/64e8bd9d15d6b22b6cb11997094fbe61edf453ea0a97c8675cb7d1c3f06f/cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, + {url = "https://files.pythonhosted.org/packages/87/ee/ddc23981fc0f5e7b5356e98884226bcb899f95ebaefc3e8e8b8742dd7e22/cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, + {url = "https://files.pythonhosted.org/packages/88/89/c34caf63029fb7628ec2ebd5c88ae0c9bd17db98c812e4065a4d020ca41f/cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, + {url = "https://files.pythonhosted.org/packages/91/bc/b7723c2fe7a22eee71d7edf2102cd43423d5f95ff3932ebaa2f82c7ec8d0/cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, + {url = "https://files.pythonhosted.org/packages/93/d0/2e2b27ea2f69b0ec9e481647822f8f77f5fc23faca2dd00d1ff009940eb7/cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, + {url = "https://files.pythonhosted.org/packages/9f/52/1e2b43cfdd7d9a39f48bc89fcaee8d8685b1295e205a4f1044909ac14d89/cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, + {url = "https://files.pythonhosted.org/packages/a4/42/54bdf22cf6c8f95113af645d0bd7be7f9358ea5c2d57d634bb11c6b4d0b2/cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, + {url = "https://files.pythonhosted.org/packages/a8/16/06b84a7063a4c0a2b081030fdd976022086da9c14e80a9ed4ba0183a98a9/cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, + {url = "https://files.pythonhosted.org/packages/a9/ba/e082df21ebaa9cb29f2c4e1d7e49a29b90fcd667d43632c6674a16d65382/cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, + {url = "https://files.pythonhosted.org/packages/aa/02/ab15b3aa572759df752491d5fa0f74128cd14e002e8e3257c1ab1587810b/cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, + {url = "https://files.pythonhosted.org/packages/ad/26/7b3a73ab7d82a64664c7c4ea470e4ec4a3c73bb4f02575c543a41e272de5/cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, + {url = "https://files.pythonhosted.org/packages/af/cb/53b7bba75a18372d57113ba934b27d0734206c283c1dfcc172347fbd9f76/cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, + {url = "https://files.pythonhosted.org/packages/af/da/9441d56d7dd19d07dcc40a2a5031a1f51c82a27cee3705edf53dadcac398/cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, + {url = "https://files.pythonhosted.org/packages/b3/b8/89509b6357ded0cbacc4e430b21a4ea2c82c2cdeb4391c148b7c7b213bed/cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, + {url = "https://files.pythonhosted.org/packages/b5/7d/df6c088ef30e78a78b0c9cca6b904d5abb698afb5bc8f5191d529d83d667/cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, + {url = "https://files.pythonhosted.org/packages/b5/80/ce5ba093c2475a73df530f643a61e2969a53366e372b24a32f08cd10172b/cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, + {url = "https://files.pythonhosted.org/packages/b7/8b/06f30caa03b5b3ac006de4f93478dbd0239e2a16566d81a106c322dc4f79/cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, + {url = "https://files.pythonhosted.org/packages/b9/4a/dde4d093a3084d0b0eadfb2703f71e31a5ced101a42c839ac5bbbd1710f2/cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, + {url = "https://files.pythonhosted.org/packages/c1/25/16a082701378170559bb1d0e9ef2d293cece8dc62913d79351beb34c5ddf/cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, + {url = "https://files.pythonhosted.org/packages/c2/0b/3b09a755ddb977c167e6d209a7536f6ade43bb0654bad42e08df1406b8e4/cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, + {url = "https://files.pythonhosted.org/packages/c5/ff/3f9d73d480567a609e98beb0c64359f8e4f31cb6a407685da73e5347b067/cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, + {url = "https://files.pythonhosted.org/packages/c6/3d/dd085bb831b22ce4d0b7ba8550e6d78960f02f770bbd1314fea3580727f8/cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, + {url = "https://files.pythonhosted.org/packages/c9/e3/0a52838832408cfbbf3a59cb19bcd17e64eb33795c9710ca7d29ae10b5b7/cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, + {url = "https://files.pythonhosted.org/packages/d3/56/3e94aa719ae96eeda8b68b3ec6e347e0a23168c6841dc276ccdcdadc9f32/cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, + {url = "https://files.pythonhosted.org/packages/d3/e1/e55ca2e0dd446caa2cc8f73c2b98879c04a1f4064ac529e1836683ca58b8/cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, + {url = "https://files.pythonhosted.org/packages/da/ff/ab939e2c7b3f40d851c0f7192c876f1910f3442080c9c846532993ec3cef/cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, + {url = "https://files.pythonhosted.org/packages/df/02/aef53d4aa43154b829e9707c8c60bab413cd21819c4a36b0d7aaa83e2a61/cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, + {url = "https://files.pythonhosted.org/packages/e8/ff/c4b7a358526f231efa46a375c959506c87622fb4a2c5726e827c55e6adf2/cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, + {url = "https://files.pythonhosted.org/packages/ea/be/c4ad40ad441ac847b67c7a37284ae3c58f39f3e638c6b0f85fb662233825/cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, + {url = "https://files.pythonhosted.org/packages/ed/a3/c5f01988ddb70a187c3e6112152e01696188c9f8a4fa4c68aa330adbb179/cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, + {url = "https://files.pythonhosted.org/packages/ef/41/19da352d341963d29a33bdb28433ba94c05672fb16155f794fad3fd907b0/cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, + {url = "https://files.pythonhosted.org/packages/f9/96/fc9e118c47b7adc45a0676f413b4a47554e5f3b6c99b8607ec9726466ef1/cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, + {url = "https://files.pythonhosted.org/packages/ff/fe/ac46ca7b00e9e4f9c62e7928a11bc9227c86e2ff43526beee00cdfb4f0e8/cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, +] +"charset-normalizer 3.1.0" = [ + {url = "https://files.pythonhosted.org/packages/00/47/f14533da238134f5067fb1d951eb03d5c4be895d6afb11c7ebd07d111acb/charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"}, + {url = "https://files.pythonhosted.org/packages/01/c7/0407de35b70525dba2a58a2724a525cf882ee76c3d2171d834463c5d2881/charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"}, + {url = "https://files.pythonhosted.org/packages/05/f3/86b5fcb5c8fe8b4231362918a7c4d8f549c56561c5fdb495a3c5b41c6862/charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"}, + {url = "https://files.pythonhosted.org/packages/07/6b/98d41a0221991a806e88c95bfeecf8935fbf465b02eb4b469770d572183a/charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"}, + {url = "https://files.pythonhosted.org/packages/0a/67/8d3d162ec6641911879651cdef670c3c6136782b711d7f8e82e2fffe06e0/charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"}, + {url = "https://files.pythonhosted.org/packages/12/12/c5c39f5a149cd6788d2e40cea5618bae37380e2754fcdf53dc9e01bdd33a/charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"}, + {url = "https://files.pythonhosted.org/packages/12/68/4812f9b05ac0a2b7619ac3dd7d7e3fc52c12006b84617021c615fc2fcf42/charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"}, + {url = "https://files.pythonhosted.org/packages/13/b7/21729a6d512246aa0bb872b90aea0d9fcd1b293762cdb1d1d33c01140074/charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"}, + {url = "https://files.pythonhosted.org/packages/16/58/19fd2f62e6ff44ba0db0cd44b584790555e2cde09293149f4409d654811b/charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"}, + {url = "https://files.pythonhosted.org/packages/18/36/7ae10a3dd7f9117b61180671f8d1e4802080cca88ad40aaabd3dad8bab0e/charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"}, + {url = "https://files.pythonhosted.org/packages/1c/9b/de2adc43345623da8e7c958719528a42b6d87d2601017ce1187d43b8a2d7/charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"}, + {url = "https://files.pythonhosted.org/packages/1f/be/c6c76cf8fcf6918922223203c83ba8192eff1c6a709e8cfec7f5ca3e7d2d/charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"}, + {url = "https://files.pythonhosted.org/packages/21/16/1b0d8fdcb81bbf180976af4f867ce0f2244d303ab10d452fde361dec3b5c/charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"}, + {url = "https://files.pythonhosted.org/packages/23/13/cf5d7bb5bc95f120df64d6c470581189df51d7f011560b2a06a395b7a120/charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"}, + {url = "https://files.pythonhosted.org/packages/26/20/83e1804a62b25891c4e770c94d9fd80233bbb3f2a51c4fadee7a196e5a5b/charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"}, + {url = "https://files.pythonhosted.org/packages/2c/2f/ec805104098085728b7cb610deede7195c6fa59f51942422f02cc427b6f6/charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"}, + {url = "https://files.pythonhosted.org/packages/2e/25/3eab2b38fef9ae59f7b4e9c1e62eb50609d911867e5acabace95fe25c0b1/charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"}, + {url = "https://files.pythonhosted.org/packages/31/8b/81c3515a69d06b501fcce69506af57a7a19bd9f42cabd1a667b1b40f2c55/charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"}, + {url = "https://files.pythonhosted.org/packages/33/10/c87ba15f779f8251ae55fa147631339cd91e7af51c3c133d2687c6e41800/charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"}, + {url = "https://files.pythonhosted.org/packages/33/97/9967fb2d364a9da38557e4af323abcd58cc05bdd8f77e9fd5ae4882772cc/charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"}, + {url = "https://files.pythonhosted.org/packages/45/3d/fa2683f5604f99fba5098a7313e5d4846baaecbee754faf115907f21a85f/charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"}, + {url = "https://files.pythonhosted.org/packages/4e/11/f7077d78b18aca8ea3186a706c0221aa2bc34c442a3d3bdf3ad401a29052/charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"}, + {url = "https://files.pythonhosted.org/packages/4f/18/92866f050f7114ba38aba4f4a69f83cc2a25dc2e5a8af4b44fd1bfd6d528/charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"}, + {url = "https://files.pythonhosted.org/packages/4f/7c/af43743567a7da2a069b4f9fa31874c3c02b963cd1fb84fe1e7568a567e6/charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"}, + {url = "https://files.pythonhosted.org/packages/4f/a2/9031ba4a008e11a21d7b7aa41751290d2f2035a2f14ecb6e589771a17c47/charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"}, + {url = "https://files.pythonhosted.org/packages/56/24/5f2dedcf3d0673931b6200c410832ae44b376848bc899dbf1fa6c91c4ebe/charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"}, + {url = "https://files.pythonhosted.org/packages/5d/2b/4d8c80400c04ae3c8dbc847de092e282b5c7b17f8f9505d68bb3e5815c71/charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"}, + {url = "https://files.pythonhosted.org/packages/61/e3/ad9ae58b28482d1069eba1edec2be87701f5dd6fd6024a665020d66677a0/charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"}, + {url = "https://files.pythonhosted.org/packages/67/30/dbab1fe5ab2ce5d3d517ad9936170d896e9687f3860a092519f1fe359812/charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"}, + {url = "https://files.pythonhosted.org/packages/67/df/660e9665ace7ad711e275194a86cb757fb4d4e513fae5ff3d39573db4984/charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"}, + {url = "https://files.pythonhosted.org/packages/68/77/af702eba147ba963b27eb00832cef6b8c4cb9fcf7404a476993876434b93/charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"}, + {url = "https://files.pythonhosted.org/packages/69/22/66351781e668158feef71c5e3b059a79ecc9efc3ef84a45888b0f3a933d5/charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"}, + {url = "https://files.pythonhosted.org/packages/6d/59/59a3f4d8a59ee270da77f9e954a0e284c9d6884d39ec69d696d9aa5ff2f2/charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"}, + {url = "https://files.pythonhosted.org/packages/72/90/667a6bc6abe42fc10adf4cd2c1e1c399d78e653dbac4c8018350843d4ab7/charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"}, + {url = "https://files.pythonhosted.org/packages/74/5f/361202de730532028458b729781b8435f320e31a622c27f30e25eec80513/charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"}, + {url = "https://files.pythonhosted.org/packages/74/f1/d0b8385b574f7e086fb6709e104b696707bd3742d54a6caf0cebbb7e975b/charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"}, + {url = "https://files.pythonhosted.org/packages/76/ad/516fed8ffaf02e7a01cd6f6e9d101a6dec64d4db53bec89d30802bf30a96/charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"}, + {url = "https://files.pythonhosted.org/packages/82/b9/51b66a647be8685dee75b7807e0f750edf5c1e4f29bc562ad285c501e3c7/charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"}, + {url = "https://files.pythonhosted.org/packages/84/23/f60cda6c70ae922ad78368982f06e7fef258fba833212f26275fe4727dc4/charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"}, + {url = "https://files.pythonhosted.org/packages/85/e8/18d408d8fe29a56012c10d6b15960940b83f06620e9d7481581cdc6d9901/charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"}, + {url = "https://files.pythonhosted.org/packages/94/70/23981e7bf098efbc4037e7c66d28a10e950d9296c08c6dea8ef290f9c79e/charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"}, + {url = "https://files.pythonhosted.org/packages/9a/f1/ff81439aa09070fee64173e6ca6ce1342f2b1cca997bcaae89e443812684/charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"}, + {url = "https://files.pythonhosted.org/packages/9e/62/a1e0a8f8830c92014602c8a88a1a20b8a68d636378077381f671e6e1cec9/charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"}, + {url = "https://files.pythonhosted.org/packages/a2/6c/5167f08da5298f383036c33cb749ab5b3405fd07853edc8314c6882c01b8/charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"}, + {url = "https://files.pythonhosted.org/packages/a4/03/355281b62c26712a50c6a9dd75339d8cdd58488fd7bf2556ba1320ebd315/charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"}, + {url = "https://files.pythonhosted.org/packages/a9/83/138d2624fdbcb62b7e14715eb721d44347e41a1b4c16544661e940793f49/charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"}, + {url = "https://files.pythonhosted.org/packages/ac/7f/62d5dff4e9cb993e4b0d4ea78a74cc84d7d92120879529e0ce0965765936/charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"}, + {url = "https://files.pythonhosted.org/packages/ac/c5/990bc41a98b7fa2677c665737fdf278bb74ad4b199c56b6b564b3d4cbfc5/charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"}, + {url = "https://files.pythonhosted.org/packages/ad/83/994bfca99e29f1bab66b9248e739360ee70b5aae0a5ee488cd776501edbc/charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"}, + {url = "https://files.pythonhosted.org/packages/b0/55/d8ef4c8c7d2a8b3a16e7d9b03c59475c2ee96a0e0c90b14c99faaac0ee3b/charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"}, + {url = "https://files.pythonhosted.org/packages/bb/dc/58fdef3ab85e8e7953a8b89ef1d2c06938b8ad88d9617f22967e1a90e6b8/charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"}, + {url = "https://files.pythonhosted.org/packages/bc/08/7e7c97399806366ca515a049c3a1e4b644a6a2048bed16e5e67bfaafd0aa/charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"}, + {url = "https://files.pythonhosted.org/packages/bc/92/ac692a303e53cdc8852ce72b1ac364b493ca5c9206a5c8db5b30a7f3019c/charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"}, + {url = "https://files.pythonhosted.org/packages/c2/35/dfb4032f5712747d3dcfdd19d0768f6d8f60910ae24ed066ecbf442be013/charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"}, + {url = "https://files.pythonhosted.org/packages/c6/ab/43ea052756b2f2dcb6a131897811c0e2704b0288f090336217d3346cd682/charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"}, + {url = "https://files.pythonhosted.org/packages/c9/8c/a76dd9f2c8803eb147e1e715727f5c3ba0ef39adaadf66a7b3698c113180/charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"}, + {url = "https://files.pythonhosted.org/packages/cc/f6/21a66e524658bd1dd7b89ac9d1ee8f7823f2d9701a2fbc458ab9ede53c63/charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"}, + {url = "https://files.pythonhosted.org/packages/d1/ff/51fe7e6446415f143b159740c727850172bc35622b2a06dde3354bdebaf3/charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"}, + {url = "https://files.pythonhosted.org/packages/d5/92/86c0f0e66e897f6818c46dadef328a5b345d061688f9960fc6ca1fd03dbe/charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"}, + {url = "https://files.pythonhosted.org/packages/d7/4c/37ad75674e8c6bc22ab01bef673d2d6e46ee44203498c9a26aa23959afe5/charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"}, + {url = "https://files.pythonhosted.org/packages/d8/ca/a7ff600781bf1e5f702ba26bb82f2ba1d3a873a3f8ad73cc44c79dfaefa9/charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"}, + {url = "https://files.pythonhosted.org/packages/dd/39/6276cf5a395ffd39b77dadf0e2fcbfca8dbfe48c56ada250c40086055143/charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"}, + {url = "https://files.pythonhosted.org/packages/e1/7c/398600268fc98b7e007f5a716bd60903fff1ecff75e45f5700212df5cd76/charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"}, + {url = "https://files.pythonhosted.org/packages/e1/b4/53678b2a14e0496fc167fe9b9e726ad33d670cfd2011031aa5caeee6b784/charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"}, + {url = "https://files.pythonhosted.org/packages/e5/aa/9d2d60d6a566423da96c15cd11cbb88a70f9aff9a4db096094ee19179cab/charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"}, + {url = "https://files.pythonhosted.org/packages/e6/98/a3f65f57651da1cecaed91d6f75291995d56c97442fa2a43d2a421139adf/charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"}, + {url = "https://files.pythonhosted.org/packages/ea/38/d31c7906c4be13060c1a5034087966774ef33ab57ff2eee76d71265173c3/charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"}, + {url = "https://files.pythonhosted.org/packages/ef/81/14b3b8f01ddaddad6cdec97f2f599aa2fa466bd5ee9af99b08b7713ccd29/charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"}, + {url = "https://files.pythonhosted.org/packages/f2/b7/e21e16c98575616f4ce09dc766dbccdac0ca119c176b184d46105e971a84/charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"}, + {url = "https://files.pythonhosted.org/packages/f2/d7/6ee92c11eda3f3c9cac1e059901092bfdf07388be7d2e60ac627527eee62/charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"}, + {url = "https://files.pythonhosted.org/packages/f4/0a/8c03913ed1eca9d831db0c28759edb6ce87af22bb55dbc005a52525a75b6/charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"}, + {url = "https://files.pythonhosted.org/packages/f6/0f/de1c4030fd669e6719277043e3b0f152a83c118dd1020cf85b51d443d04a/charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"}, + {url = "https://files.pythonhosted.org/packages/f8/ed/500609cb2457b002242b090c814549997424d72690ef3058cfdfca91f68b/charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"}, + {url = "https://files.pythonhosted.org/packages/fa/8e/2e5c742c3082bce3eea2ddd5b331d08050cda458bc362d71c48e07a44719/charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"}, + {url = "https://files.pythonhosted.org/packages/ff/d7/8d757f8bd45be079d76309248845a04f09619a7b17d6dfc8c9ff6433cac2/charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"}, +] +"click 8.1.3" = [ + {url = "https://files.pythonhosted.org/packages/59/87/84326af34517fca8c58418d148f2403df25303e02736832403587318e9e8/click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, + {url = "https://files.pythonhosted.org/packages/c2/f1/df59e28c642d583f7dacffb1e0965d0e00b218e0186d7858ac5233dce840/click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, +] +"colorama 0.4.6" = [ + {url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] +"contourpy 1.0.7" = [ + {url = "https://files.pythonhosted.org/packages/02/4d/009c25f6a3f27dab8fabd5e0f9eeb2bc2697bfcf533e9d07ee825d7fae22/contourpy-1.0.7-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f99e9486bf1bb979d95d5cffed40689cb595abb2b841f2991fc894b3452290e8"}, + {url = "https://files.pythonhosted.org/packages/03/a4/0119e530f7926377d283ed742b120ef5cf3f37f7c5aef5e77cfc59ebabfc/contourpy-1.0.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:130230b7e49825c98edf0b428b7aa1125503d91732735ef897786fe5452b1ec2"}, + {url = "https://files.pythonhosted.org/packages/08/ce/9bfe9f028cb5a8ee97898da52f4905e0e2d9ca8203ffdcdbe80e1769b549/contourpy-1.0.7-cp38-cp38-win_amd64.whl", hash = "sha256:57119b0116e3f408acbdccf9eb6ef19d7fe7baf0d1e9aaa5381489bc1aa56556"}, + {url = "https://files.pythonhosted.org/packages/09/c4/72ffdbea5f0f2a89e544b5e91793548488b892855c170f89f4b2d8d0597e/contourpy-1.0.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a9d7587d2fdc820cc9177139b56795c39fb8560f540bba9ceea215f1f66e1566"}, + {url = "https://files.pythonhosted.org/packages/17/22/ae833bbd6ec6dc4b2134d095332dc9853d8ab81c9ced3ec18f1db1942134/contourpy-1.0.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5dd34c1ae752515318224cba7fc62b53130c45ac6a1040c8b7c1a223c46e8967"}, + {url = "https://files.pythonhosted.org/packages/26/df/b5c53b350d9f8c8672fa96a756c12445854be430469a92ca081dfc0f3585/contourpy-1.0.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b8d587cc39057d0afd4166083d289bdeff221ac6d3ee5046aef2d480dc4b503c"}, + {url = "https://files.pythonhosted.org/packages/2f/e2/02a1b7aa790981af054917154e4c35d5c00fdfaa018b77369758c08918c4/contourpy-1.0.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0f9d350b639db6c2c233d92c7f213d94d2e444d8e8fc5ca44c9706cf72193772"}, + {url = "https://files.pythonhosted.org/packages/30/99/a966df6cb28bab6090527e562682067737c5c6816ffcd7a02812e4a4ffdd/contourpy-1.0.7-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:31a55dccc8426e71817e3fe09b37d6d48ae40aae4ecbc8c7ad59d6893569c436"}, + {url = "https://files.pythonhosted.org/packages/31/d7/247a889a9c425197aeac5e31286f3050dee63aa3466c939aa302cdb2b6cb/contourpy-1.0.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9e20e5a1908e18aaa60d9077a6d8753090e3f85ca25da6e25d30dc0a9e84c2c6"}, + {url = "https://files.pythonhosted.org/packages/33/2e/1338f7b7ba17815c00507d0ace2804e37eb85a8c340fd64da5e38690c6d1/contourpy-1.0.7-cp311-cp311-win32.whl", hash = "sha256:4ee3ee247f795a69e53cd91d927146fb16c4e803c7ac86c84104940c7d2cabf0"}, + {url = "https://files.pythonhosted.org/packages/50/de/28740ce2298fee83d7ce2c935a122c8f38e46b6a904e7533ef32e7206e96/contourpy-1.0.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:95c3acddf921944f241b6773b767f1cbce71d03307270e2d769fd584d5d1092d"}, + {url = "https://files.pythonhosted.org/packages/54/d0/27e77c2028f9df32184427d73f4547d8cb1aca5087e013de1ad414dd3183/contourpy-1.0.7-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:71b0bf0c30d432278793d2141362ac853859e87de0a7dee24a1cea35231f0d50"}, + {url = "https://files.pythonhosted.org/packages/55/31/be8029093f8b1181f59f4d1f0438a7c60babaf6230947edb387e09ed5c1e/contourpy-1.0.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fc1464c97579da9f3ab16763c32e5c5d5bb5fa1ec7ce509a4ca6108b61b84fab"}, + {url = "https://files.pythonhosted.org/packages/59/65/33affcc4d0e1459eaa66f057260076fecd418aa00167f95670e1fbbf597a/contourpy-1.0.7-cp39-cp39-win32.whl", hash = "sha256:c5210e5d5117e9aec8c47d9156d1d3835570dd909a899171b9535cb4a3f32693"}, + {url = "https://files.pythonhosted.org/packages/59/f6/d1b30d463175af6316e30c45e4618aaabb4d302fd53308fa7d7a62c8f677/contourpy-1.0.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:64757f6460fc55d7e16ed4f1de193f362104285c667c112b50a804d482777edd"}, + {url = "https://files.pythonhosted.org/packages/5a/49/05e1215b1a528db06e4cb84d11aef00f0256ccd7b4a13a9132973e27aa62/contourpy-1.0.7-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:24847601071f740837aefb730e01bd169fbcaa610209779a78db7ebb6e6a7051"}, + {url = "https://files.pythonhosted.org/packages/63/e6/15b60f93ba888278381cf0cb8f04a988c97f52c3dd235abf9a157b959d79/contourpy-1.0.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abf298af1e7ad44eeb93501e40eb5a67abbf93b5d90e468d01fc0c4451971afa"}, + {url = "https://files.pythonhosted.org/packages/70/a7/22a5fe12c38e978b941719b04cd81085877eb567165b93358193ec1b3bdc/contourpy-1.0.7-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ce41676b3d0dd16dbcfabcc1dc46090aaf4688fd6e819ef343dbda5a57ef0161"}, + {url = "https://files.pythonhosted.org/packages/72/2e/4d50b842a8747776dcd172f9c19514800844d1e67dd1dfdb41c1f74a8b58/contourpy-1.0.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:9056c5310eb1daa33fc234ef39ebfb8c8e2533f088bbf0bc7350f70a29bde1ac"}, + {url = "https://files.pythonhosted.org/packages/81/ac/44b8499389fa3d88fa38fe3301a5b7e22352f1b642cf72f25dc457e9f4b2/contourpy-1.0.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b6d0f9e1d39dbfb3977f9dd79f156c86eb03e57a7face96f199e02b18e58d32a"}, + {url = "https://files.pythonhosted.org/packages/82/42/6084f3424d47cc47c3eecf926ea2718fcc3cefd5ddd599964f2bccc74b96/contourpy-1.0.7-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69f8ff4db108815addd900a74df665e135dbbd6547a8a69333a68e1f6e368ac2"}, + {url = "https://files.pythonhosted.org/packages/82/5b/5eaf7098f38f1b98ed56993e87dd34a5c64e6abff6d4f11394ca2091e600/contourpy-1.0.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6381fa66866b0ea35e15d197fc06ac3840a9b2643a6475c8fff267db8b9f1e69"}, + {url = "https://files.pythonhosted.org/packages/89/70/b1490db2282e28fef85a29e17ffa976efa621b24e0e36774248805125a5f/contourpy-1.0.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fd7dc0e6812b799a34f6d12fcb1000539098c249c8da54f3566c6a6461d0dbad"}, + {url = "https://files.pythonhosted.org/packages/8b/f0/eb4bce3032b612a920a044b654164040c3392d3eaa95ec482895815a0f51/contourpy-1.0.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:769eef00437edf115e24d87f8926955f00f7704bede656ce605097584f9966dc"}, + {url = "https://files.pythonhosted.org/packages/8d/cc/c8e32001298b50331348312ac2a965279ddf1c20d25e68ca596fd8a7aaa2/contourpy-1.0.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c71fdd8f1c0f84ffd58fca37d00ca4ebaa9e502fb49825484da075ac0b0b803"}, + {url = "https://files.pythonhosted.org/packages/8e/d2/38b3da76c0a654dac29f7768a870b930be9a0d35fb469acb86f8d0aaeb54/contourpy-1.0.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efb8f6d08ca7998cf59eaf50c9d60717f29a1a0a09caa46460d33b2924839dbd"}, + {url = "https://files.pythonhosted.org/packages/95/f1/7e052a263afca2a36417957b7acb56290599458b84135b504dc3ef4ca88d/contourpy-1.0.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:366a0cf0fc079af5204801786ad7a1c007714ee3909e364dbac1729f5b0849e5"}, + {url = "https://files.pythonhosted.org/packages/a5/54/307c937af1875abf17d007e738f244fe128a85f1ac82bbd8876a41b84261/contourpy-1.0.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6c180d89a28787e4b73b07e9b0e2dac7741261dbdca95f2b489c4f8f887dd810"}, + {url = "https://files.pythonhosted.org/packages/a7/40/0aed6d92734ffad008a841b43723ca0216292df27b706de0afbf7a84dff4/contourpy-1.0.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ed33433fc3820263a6368e532f19ddb4c5990855e4886088ad84fd7c4e561c71"}, + {url = "https://files.pythonhosted.org/packages/af/5b/1030d528eea1ba29b18681085086ae8c255aada1d38b4809bdc39d4131e0/contourpy-1.0.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:031154ed61f7328ad7f97662e48660a150ef84ee1bc8876b6472af88bf5a9b98"}, + {url = "https://files.pythonhosted.org/packages/b1/5e/9da7dd3f5916f63b7cacb5d13a2eff294b3041cfbae5bc296991df8aa784/contourpy-1.0.7-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:152fd8f730c31fd67fe0ffebe1df38ab6a669403da93df218801a893645c6ccc"}, + {url = "https://files.pythonhosted.org/packages/b3/0c/0840a89d63cc0866a5118367ae1c789269e350682e6f4aceee5a1f3d608d/contourpy-1.0.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efe99298ba37e37787f6a2ea868265465410822f7bea163edcc1bd3903354ea9"}, + {url = "https://files.pythonhosted.org/packages/b4/9b/6edb9d3e334a70a212f66a844188fcb57ddbd528cbc3b1fe7abfc317ddd7/contourpy-1.0.7.tar.gz", hash = "sha256:d8165a088d31798b59e91117d1f5fc3df8168d8b48c4acc10fc0df0d0bdbcc5e"}, + {url = "https://files.pythonhosted.org/packages/b6/4b/18a8a0c4d4f935d3711fe1325d4f0b5277886bcef01ced6ecc45074c3f19/contourpy-1.0.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54d43960d809c4c12508a60b66cb936e7ed57d51fb5e30b513934a4a23874fae"}, + {url = "https://files.pythonhosted.org/packages/b6/b8/6894c9e851f7442ebbc054537f56021c9ebc0691799ac4b92e380f3a2712/contourpy-1.0.7-cp310-cp310-win_amd64.whl", hash = "sha256:3caea6365b13119626ee996711ab63e0c9d7496f65641f4459c60a009a1f3e80"}, + {url = "https://files.pythonhosted.org/packages/c4/27/90f82ec9667b3b4fceced99e11c3519879e949ecb74ff976567cf1e5ba7d/contourpy-1.0.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ae90d5a8590e5310c32a7630b4b8618cef7563cebf649011da80874d0aa8f414"}, + {url = "https://files.pythonhosted.org/packages/c7/97/ba9ace011734cd01b63eb7d39b2cf97afbfa985b0239ab0db85bafa9b207/contourpy-1.0.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7281244c99fd7c6f27c1c6bfafba878517b0b62925a09b586d88ce750a016d2"}, + {url = "https://files.pythonhosted.org/packages/ca/37/fb73c2052d498f61c2208b5190c209534d2afe89980f6a567e2c0e946304/contourpy-1.0.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:30676ca45084ee61e9c3da589042c24a57592e375d4b138bd84d8709893a1ba4"}, + {url = "https://files.pythonhosted.org/packages/cb/6c/cef46debcbe1cc2072f6367f4430e55331df5776a8d2ee9eb6b33a3d160f/contourpy-1.0.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89ba9bb365446a22411f0673abf6ee1fea3b2cf47b37533b970904880ceb72f3"}, + {url = "https://files.pythonhosted.org/packages/cc/89/fae9ae6d8e9d1149bed7b0377a4ee77a40293bdd8b681212ab4af2c3fbb2/contourpy-1.0.7-cp310-cp310-win32.whl", hash = "sha256:3c184ad2433635f216645fdf0493011a4667e8d46b34082f5a3de702b6ec42e3"}, + {url = "https://files.pythonhosted.org/packages/d0/4f/ebdb24671582b56c953f79b6b1261adc0fdf6f7ec8f30cc45efefd5dbcc9/contourpy-1.0.7-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a1e97b86f73715e8670ef45292d7cc033548266f07d54e2183ecb3c87598888f"}, + {url = "https://files.pythonhosted.org/packages/d3/b1/e0151100124d28729622bf714462c76b2bce38e136215d9236863d130eb9/contourpy-1.0.7-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58569c491e7f7e874f11519ef46737cea1d6eda1b514e4eb5ac7dab6aa864d02"}, + {url = "https://files.pythonhosted.org/packages/d5/d6/6feb6ddca04c3459beaf126a81e5921b944300d5c926e439327590ab26fb/contourpy-1.0.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a011cf354107b47c58ea932d13b04d93c6d1d69b8b6dce885e642531f847566"}, + {url = "https://files.pythonhosted.org/packages/e0/10/12f2e41e84841a825b31d91c74f64761be470953823b87e340c898dffd92/contourpy-1.0.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7f6979d20ee5693a1057ab53e043adffa1e7418d734c1532e2d9e915b08d8ec2"}, + {url = "https://files.pythonhosted.org/packages/e3/95/08d6e4c5f53411fdc4ef48b451a6427d68ec761865436e84ab77a0d64db3/contourpy-1.0.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e96a08b62bb8de960d3a6afbc5ed8421bf1a2d9c85cc4ea73f4bc81b4910500f"}, + {url = "https://files.pythonhosted.org/packages/ea/75/3ed26ede7745109880373de515a273e6dbe43d31960279982fac6d6ddf1d/contourpy-1.0.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3e927b3868bd1e12acee7cc8f3747d815b4ab3e445a28d2e5373a7f4a6e76ba1"}, + {url = "https://files.pythonhosted.org/packages/ea/d6/5be880ae773716ec35863e034d47914de5083cdd2da97fd6c22f84ec9245/contourpy-1.0.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc331c13902d0f50845099434cd936d49d7a2ca76cb654b39691974cb1e4812d"}, + {url = "https://files.pythonhosted.org/packages/ec/56/7736333adc941087b0f86db37b0dffce83fd4e35400ab86ce1bf0690d04f/contourpy-1.0.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8acf74b5d383414401926c1598ed77825cd530ac7b463ebc2e4f46638f56cce6"}, + {url = "https://files.pythonhosted.org/packages/ec/59/5eac40e348a7bf803cea221bcd27f74a49cb81667b400fdfbb680e86e7bb/contourpy-1.0.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87f4d8941a9564cda3f7fa6a6cd9b32ec575830780677932abdec7bcb61717b0"}, + {url = "https://files.pythonhosted.org/packages/ed/71/546cbcae0cc0653b33afe445a1215f8dddea86f4dd8b31834008588eb8d7/contourpy-1.0.7-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e9ebb4425fc1b658e13bace354c48a933b842d53c458f02c86f371cecbedecc"}, + {url = "https://files.pythonhosted.org/packages/f2/de/7ddc513caca0e287434cd389855a5d2e185c22685fb1dc6789169dd858be/contourpy-1.0.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a877ada905f7d69b2a31796c4b66e31a8068b37aa9b78832d41c82fc3e056ddd"}, + {url = "https://files.pythonhosted.org/packages/f3/a9/3640440269719283a250df109a7f91b48d657bf9c0ceb5fe950eb894ecf7/contourpy-1.0.7-cp311-cp311-win_amd64.whl", hash = "sha256:5caeacc68642e5f19d707471890f037a13007feba8427eb7f2a60811a1fc1350"}, + {url = "https://files.pythonhosted.org/packages/f9/a1/d5c6350a39a2cf221236883d3c6f2b50e3ef5e4f4b7ebf06ee280521a32d/contourpy-1.0.7-cp39-cp39-win_amd64.whl", hash = "sha256:60835badb5ed5f4e194a6f21c09283dd6e007664a86101431bf870d9e86266c4"}, + {url = "https://files.pythonhosted.org/packages/f9/ca/e9208ba62f5c14d950273d2d4da75aa9f3879809d6813b058514fc5dcccb/contourpy-1.0.7-cp38-cp38-win32.whl", hash = "sha256:62398c80ef57589bdbe1eb8537127321c1abcfdf8c5f14f479dbbe27d0322e66"}, + {url = "https://files.pythonhosted.org/packages/fa/56/ab73a8bab463df907ac2c2249bfee428900e2b88e28ccf5ab059c106e07c/contourpy-1.0.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:38e2e577f0f092b8e6774459317c05a69935a1755ecfb621c0a98f0e3c09c9a5"}, +] +"cycler 0.11.0" = [ + {url = "https://files.pythonhosted.org/packages/34/45/a7caaacbfc2fa60bee42effc4bcc7d7c6dbe9c349500e04f65a861c15eb9/cycler-0.11.0.tar.gz", hash = "sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f"}, + {url = "https://files.pythonhosted.org/packages/5c/f9/695d6bedebd747e5eb0fe8fad57b72fdf25411273a39791cde838d5a8f51/cycler-0.11.0-py3-none-any.whl", hash = "sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3"}, +] +"decorator 5.1.1" = [ + {url = "https://files.pythonhosted.org/packages/66/0c/8d907af351aa16b42caae42f9d6aa37b900c67308052d10fdce809f8d952/decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, + {url = "https://files.pythonhosted.org/packages/d5/50/83c593b07763e1161326b3b8c6686f0f4b0f24d5526546bee538c89837d6/decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, +] +"exceptiongroup 1.1.1" = [ + {url = "https://files.pythonhosted.org/packages/61/97/17ed81b7a8d24d8f69b62c0db37abbd8c0042d4b3fc429c73dab986e7483/exceptiongroup-1.1.1-py3-none-any.whl", hash = "sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e"}, + {url = "https://files.pythonhosted.org/packages/cc/38/57f14ddc8e8baeddd8993a36fe57ce7b4ba174c35048b9a6d270bb01e833/exceptiongroup-1.1.1.tar.gz", hash = "sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785"}, +] +"fonttools 4.39.3" = [ + {url = "https://files.pythonhosted.org/packages/16/07/1c7547e27f559ec078801d522cc4d5127cdd4ef8e831c8ddcd9584668a07/fonttools-4.39.3-py3-none-any.whl", hash = "sha256:64c0c05c337f826183637570ac5ab49ee220eec66cf50248e8df527edfa95aeb"}, + {url = "https://files.pythonhosted.org/packages/39/d7/ab05ae34dd57dd657e492d95ce7ec6bfebfb3bfcdc7316660ac5a13fcfee/fonttools-4.39.3.zip", hash = "sha256:9234b9f57b74e31b192c3fc32ef1a40750a8fbc1cd9837a7b7bfc4ca4a5c51d7"}, +] +"idna 3.4" = [ + {url = "https://files.pythonhosted.org/packages/8b/e1/43beb3d38dba6cb420cefa297822eac205a277ab43e5ba5d5c46faf96438/idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, + {url = "https://files.pythonhosted.org/packages/fc/34/3030de6f1370931b9dbb4dad48f6ab1015ab1d32447850b9fc94e60097be/idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, +] +"importlib-metadata 6.1.0" = [ + {url = "https://files.pythonhosted.org/packages/e2/d8/3d431bade4598ad9e33be9da41d15e6607b878008e922d122659ab01b077/importlib_metadata-6.1.0.tar.gz", hash = "sha256:43ce9281e097583d758c2c708c4376371261a02c34682491a8e98352365aad20"}, + {url = "https://files.pythonhosted.org/packages/f8/7d/e3adad613703c86d62aa991b45d6f090cf59975078a8c8100b50a0c86948/importlib_metadata-6.1.0-py3-none-any.whl", hash = "sha256:ff80f3b5394912eb1b108fcfd444dc78b7f1f3e16b16188054bd01cb9cb86f09"}, +] +"importlib-resources 5.12.0" = [ + {url = "https://files.pythonhosted.org/packages/38/71/c13ea695a4393639830bf96baea956538ba7a9d06fcce7cef10bfff20f72/importlib_resources-5.12.0-py3-none-any.whl", hash = "sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a"}, + {url = "https://files.pythonhosted.org/packages/4e/a2/3cab1de83f95dd15297c15bdc04d50902391d707247cada1f021bbfe2149/importlib_resources-5.12.0.tar.gz", hash = "sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6"}, +] +"iniconfig 2.0.0" = [ + {url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, + {url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, +] +"joblib 1.2.0" = [ + {url = "https://files.pythonhosted.org/packages/45/dd/a5435a6902d6315241c48a5343e6e6675b007e05d3738ed97a7a47864e53/joblib-1.2.0.tar.gz", hash = "sha256:e1cee4a79e4af22881164f218d4311f60074197fb707e082e803b61f6d137018"}, + {url = "https://files.pythonhosted.org/packages/91/d4/3b4c8e5a30604df4c7518c562d4bf0502f2fa29221459226e140cf846512/joblib-1.2.0-py3-none-any.whl", hash = "sha256:091138ed78f800342968c523bdde947e7a305b8594b910a0fea2ab83c3c6d385"}, +] +"kiwisolver 1.4.4" = [ + {url = "https://files.pythonhosted.org/packages/03/93/11790e8e81b89acd3a1c8a6b501f8a05b1c41beee0990582699cdda29557/kiwisolver-1.4.4-cp37-cp37m-win_amd64.whl", hash = "sha256:03baab2d6b4a54ddbb43bba1a3a2d1627e82d205c5cf8f4c924dc49284b87166"}, + {url = "https://files.pythonhosted.org/packages/08/14/2ee2b7013f6c7f92f76ee9e8368736410c24f404e70b0f882758493bdeec/kiwisolver-1.4.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b"}, + {url = "https://files.pythonhosted.org/packages/15/cd/5b0a188f94b82b46d43023dc47354c5bb21fa4539858cd06df80889451c5/kiwisolver-1.4.4-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:efda5fc8cc1c61e4f639b8067d118e742b812c930f708e6667a5ce0d13499e29"}, + {url = "https://files.pythonhosted.org/packages/1c/7b/ef27630b65a277b5499b60637146265e6d9f8c865877dcdf8beb7fa2f8e7/kiwisolver-1.4.4-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e307eb9bd99801f82789b44bb45e9f541961831c7311521b13a6c85afc09767"}, + {url = "https://files.pythonhosted.org/packages/26/32/6bd586b28b3736aa0997a24e5ebecebc83b2fdb86f29990875f7a2b83e4d/kiwisolver-1.4.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ad881edc7ccb9d65b0224f4e4d05a1e85cf62d73aab798943df6d48ab0cd79a1"}, + {url = "https://files.pythonhosted.org/packages/26/f3/1daa54509332dff966e1493fe0d5b573e0e11a56d301323ec6c667a53142/kiwisolver-1.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b533558eae785e33e8c148a8d9921692a9fe5aa516efbdff8606e7d87b9d5824"}, + {url = "https://files.pythonhosted.org/packages/2b/1f/a7cbeae4d9b0af9ffb8f139d4d4c8493716deb125cd23d8fabb6cd20783e/kiwisolver-1.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8c808594c88a025d4e322d5bb549282c93c8e1ba71b790f539567932722d7bd8"}, + {url = "https://files.pythonhosted.org/packages/2f/95/c60f54e280560f9707ba15730d9bcaea2214bf9ff852e057263b22e09dd9/kiwisolver-1.4.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1792d939ec70abe76f5054d3f36ed5656021dcad1322d1cc996d4e54165cef9"}, + {url = "https://files.pythonhosted.org/packages/42/09/973a15453e1dcfbcd205030e8ed6f32c3282ce4a9826ff8c309d4b357afe/kiwisolver-1.4.4-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:872b8ca05c40d309ed13eb2e582cab0c5a05e81e987ab9c521bf05ad1d5cf5cb"}, + {url = "https://files.pythonhosted.org/packages/43/67/634a9c3854e4f908ff5ffd48ea51b1ca3e096ce79ffdd91ebdcd07d6d64b/kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6295ecd49304dcf3bfbfa45d9a081c96509e95f4b9d0eb7ee4ec0530c4a96514"}, + {url = "https://files.pythonhosted.org/packages/48/36/b8605e1559c97522950658302fd7371affac055c554d45ba1c4665b29724/kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:db608a6757adabb32f1cfe6066e39b3706d8c3aa69bbc353a5b61edad36a5cb4"}, + {url = "https://files.pythonhosted.org/packages/49/b9/edd9b69e1f2a8339347bcfcfbb14ce19db4a81158d01d8fd26fc3a088109/kiwisolver-1.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:ba59c92039ec0a66103b1d5fe588fa546373587a7d68f5c96f743c3396afc04b"}, + {url = "https://files.pythonhosted.org/packages/4d/91/08eaa0f13fe644ae913cb157e9599ce64b64a99620df3beb0b142690e264/kiwisolver-1.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc8d3bd6c72b2dd9decf16ce70e20abcb3274ba01b4e1c96031e0c4067d1e7cd"}, + {url = "https://files.pythonhosted.org/packages/4f/05/59b34e788bf2b45c7157c3d898d567d28bc42986c1b6772fb1af329eea0d/kiwisolver-1.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:5bce61af018b0cb2055e0e72e7d65290d822d3feee430b7b8203d8a855e78766"}, + {url = "https://files.pythonhosted.org/packages/52/a4/93745b44a80fbddb8deec9c5f17193bfea1fce8d128c18b9489edc6e5917/kiwisolver-1.4.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b428ef021242344340460fa4c9185d0b1f66fbdbfecc6c63eff4b7c29fad429d"}, + {url = "https://files.pythonhosted.org/packages/5a/77/c8e94936cbafe39a7f454a365cf7fe5eec33bbd16c9301e10dea44ed24db/kiwisolver-1.4.4-cp38-cp38-win32.whl", hash = "sha256:75facbe9606748f43428fc91a43edb46c7ff68889b91fa31f53b58894503a191"}, + {url = "https://files.pythonhosted.org/packages/5f/5c/272a7dd49a1914f35cd8d6d9f386defa8b047f6fbd06badd6b77b3ba24e7/kiwisolver-1.4.4.tar.gz", hash = "sha256:d41997519fcba4a1e46eb4a2fe31bc12f0ff957b2b81bac28db24744f333e955"}, + {url = "https://files.pythonhosted.org/packages/63/33/a52b723c5e6f1a7b0d73d68761f05ba217519da3ec264ef32dbead9e68ec/kiwisolver-1.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:e7da3fec7408813a7cebc9e4ec55afed2d0fd65c4754bc376bf03498d4e92686"}, + {url = "https://files.pythonhosted.org/packages/64/e2/a8764238ea5595ab392b78cd23c2b07044133fddc8b3acdd60efbcc1af99/kiwisolver-1.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f4f270de01dd3e129a72efad823da90cc4d6aafb64c410c9033aba70db9f1ff0"}, + {url = "https://files.pythonhosted.org/packages/68/20/2ce1186ef4edf47281faf58f6dd72a1fcd2be1fc66514bd2d220097bdcd1/kiwisolver-1.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:3fe20f63c9ecee44560d0e7f116b3a747a5d7203376abeea292ab3152334d004"}, + {url = "https://files.pythonhosted.org/packages/69/6c/8597155d3755337c7e39a7aaf54a07de0ad2572b109d904aeb70b4ab6f36/kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:283dffbf061a4ec60391d51e6155e372a1f7a4f5b15d59c8505339454f8989e4"}, + {url = "https://files.pythonhosted.org/packages/6a/0f/7923176faa67482b028f242353e1939361cc90b089ebfff007503d008e7c/kiwisolver-1.4.4-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:787518a6789009c159453da4d6b683f468ef7a65bbde796bcea803ccf191058d"}, + {url = "https://files.pythonhosted.org/packages/70/85/2c6f6c2de0820c97d49ffa7e183ace21f02a683cd0d6fa98f58762e597f6/kiwisolver-1.4.4-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c2dbb44c3f7e6c4d3487b31037b1bdbf424d97687c1747ce4ff2895795c9bf69"}, + {url = "https://files.pythonhosted.org/packages/71/1c/f665848b07050dbc94297dd626f42144c045513f90e50ddc5b1716a8f261/kiwisolver-1.4.4-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:81e38381b782cc7e1e46c4e14cd997ee6040768101aefc8fa3c24a4cc58e98f8"}, + {url = "https://files.pythonhosted.org/packages/73/c9/7e5ea6d8dd9a3b91e957ada019a149f54ef294275f784451991de4d4d297/kiwisolver-1.4.4-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:91672bacaa030f92fc2f43b620d7b337fd9a5af28b0d6ed3f77afc43c4a64b5a"}, + {url = "https://files.pythonhosted.org/packages/75/a6/27a96c414200846c9a13ac16635c747b0d2bd8c391f22f31cc9e638e1849/kiwisolver-1.4.4-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ecb1fa0db7bf4cff9dac752abb19505a233c7f16684c5826d1f11ebd9472b871"}, + {url = "https://files.pythonhosted.org/packages/75/e2/37f78c092e2e11fe559b9cfca1172fca0c20ca9b521ec806be9679251f1d/kiwisolver-1.4.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d0611a0a2a518464c05ddd5a3a1a0e856ccc10e67079bb17f265ad19ab3c7597"}, + {url = "https://files.pythonhosted.org/packages/78/df/13ab40e58fa093243f9732cfe2880fc84cee6963f75a889789a682bc1c50/kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:877272cf6b4b7e94c9614f9b10140e198d2186363728ed0f701c6eee1baec1da"}, + {url = "https://files.pythonhosted.org/packages/79/0f/5cc4ca3df66c49817944b9a1c7343ba70aefffc868ddf651d7839cc5dffd/kiwisolver-1.4.4-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7577c1987baa3adc4b3c62c33bd1118c3ef5c8ddef36f0f2c950ae0b199e100d"}, + {url = "https://files.pythonhosted.org/packages/7f/6a/32e4f1b10b36a5d24ade09f01e1af6f9cf8fa5a868bf9f294b82302f1831/kiwisolver-1.4.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9f85003f5dfa867e86d53fac6f7e6f30c045673fa27b603c397753bebadc3008"}, + {url = "https://files.pythonhosted.org/packages/83/b7/f6386940bec20b842a097697a0396a0941cbc5262d4b619dee2cc123502b/kiwisolver-1.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2f5e60fabb7343a836360c4f0919b8cd0d6dbf08ad2ca6b9cf90bf0c76a3c4f6"}, + {url = "https://files.pythonhosted.org/packages/86/7a/6b438da7534dacd232ed4e19f74f4edced2cda9494d7e6536f54edfdf4a5/kiwisolver-1.4.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2e407cb4bd5a13984a6c2c0fe1845e4e41e96f183e5e5cd4d77a857d9693494c"}, + {url = "https://files.pythonhosted.org/packages/87/d8/c46267cbc3799a7609ace482f64a4e242fe95d67f562a05bc80c0e59d4a0/kiwisolver-1.4.4-cp37-cp37m-win32.whl", hash = "sha256:a553dadda40fef6bfa1456dc4be49b113aa92c2a9a9e8711e955618cd69622e3"}, + {url = "https://files.pythonhosted.org/packages/89/84/b63b6ada3b349605cf97e28b71bdf37dbf74207c5c56e0a03e583226edc0/kiwisolver-1.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d5b61785a9ce44e5a4b880272baa7cf6c8f48a5180c3e81c59553ba0cb0821ca"}, + {url = "https://files.pythonhosted.org/packages/8e/e0/6e6afbbdc7cac65e25e5407922f8e3997e53eda6bab70e8fa6f7765de60d/kiwisolver-1.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c97528e64cb9ebeff9701e7938653a9951922f2a38bd847787d4a8e498cc83ae"}, + {url = "https://files.pythonhosted.org/packages/92/be/d8b1ff785ef6ab899e6934e3e458580761beb561727ece19f83f96767de6/kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf7d9fce9bcc4752ca4a1b80aabd38f6d19009ea5cbda0e0856983cf6d0023f5"}, + {url = "https://files.pythonhosted.org/packages/93/96/57c94c63730407b7986606f4e58c1aaa5792323ea9a28cfffa9bd432257e/kiwisolver-1.4.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6cb459eea32a4e2cf18ba5fcece2dbdf496384413bc1bae15583f19e567f3b2"}, + {url = "https://files.pythonhosted.org/packages/96/61/79804e00f7e8b5c54f5fce84740896a18142b5e85152c44d565c0d763f05/kiwisolver-1.4.4-cp311-cp311-win32.whl", hash = "sha256:d06adcfa62a4431d404c31216f0f8ac97397d799cd53800e9d3efc2fbb3cf14e"}, + {url = "https://files.pythonhosted.org/packages/96/7f/63c0f0c775a9fc2b59684806d0a1da62f790adca4c0f5f3106059349f8a0/kiwisolver-1.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f9f39e2f049db33a908319cf46624a569b36983c7c78318e9726a4cb8923b26c"}, + {url = "https://files.pythonhosted.org/packages/a4/36/c414d75be311ce97ef7248edcc4fc05afae2998641bf6b592d43a9dee581/kiwisolver-1.4.4-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7c43e1e1206cd421cd92e6b3280d4385d41d7166b3ed577ac20444b6995a445f"}, + {url = "https://files.pythonhosted.org/packages/a5/d2/d55d40e3a65cc9cdf6b311862fba48905c1d59851594eaecc381727e0883/kiwisolver-1.4.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ea39b0ccc4f5d803e3337dd46bcce60b702be4d86fd0b3d7531ef10fd99a1ac"}, + {url = "https://files.pythonhosted.org/packages/a9/f1/ef22058926af3f3588370b6ef09193790c42790f517e91a6103cc5ec28f0/kiwisolver-1.4.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41dae968a94b1ef1897cb322b39360a0812661dba7c682aa45098eb8e193dbdf"}, + {url = "https://files.pythonhosted.org/packages/a9/fd/049c39c4c501775f5439ba8e08bf298d5af828b99c703e265c5150311ccd/kiwisolver-1.4.4-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:abbe9fa13da955feb8202e215c4018f4bb57469b1b78c7a4c5c7b93001699938"}, + {url = "https://files.pythonhosted.org/packages/ab/8f/8dbe2d4efc4c0b08ec67d6efb7cc31fbfd688c80afad85f65980633b0d37/kiwisolver-1.4.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1041feb4cda8708ce73bb4dcb9ce1ccf49d553bf87c3954bdfa46f0c3f77252c"}, + {url = "https://files.pythonhosted.org/packages/ac/e6/823a136cefcf0592338827f54cb73642c2ea580acd8a7d5dbf8f32437848/kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78d6601aed50c74e0ef02f4204da1816147a6d3fbdc8b3872d263338a9052c51"}, + {url = "https://files.pythonhosted.org/packages/ae/a1/5259f35063488465c433ddf70b000ba8eff024093849934b09d3bdc8fb2a/kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4bd472dbe5e136f96a4b18f295d159d7f26fd399136f5b17b08c4e5f498cd494"}, + {url = "https://files.pythonhosted.org/packages/b1/5c/245410d305b3648aee78171365333581e3ea93324f06b9e3834ed4464eb6/kiwisolver-1.4.4-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2a66fdfb34e05b705620dd567f5a03f239a088d5a3f321e7b6ac3239d22aa286"}, + {url = "https://files.pythonhosted.org/packages/b7/9d/b9d5c0412d46defef863f365b8ab8817b660e1f05385c0ed670deab0aa49/kiwisolver-1.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bc9db8a3efb3e403e4ecc6cd9489ea2bac94244f80c78e27c31dcc00d2790ac2"}, + {url = "https://files.pythonhosted.org/packages/b7/e0/ee57a00f6bf411e54cf0521eceab306d1c606c5a640ee1b54951d2bd41b7/kiwisolver-1.4.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a68b62a02953b9841730db7797422f983935aeefceb1679f0fc85cbfbd311c32"}, + {url = "https://files.pythonhosted.org/packages/b7/f4/12806263b2ef28704319b902c08e7aaf8c6931255a02d76b01c1cac66ab6/kiwisolver-1.4.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:968f44fdbf6dd757d12920d63b566eeb4d5b395fd2d00d29d7ef00a00582aac9"}, + {url = "https://files.pythonhosted.org/packages/bc/45/3022994d464bf1cf836cbca3fe94e0badc19e8b89baff82d412396cca19c/kiwisolver-1.4.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:db5283d90da4174865d520e7366801a93777201e91e79bacbac6e6927cbceede"}, + {url = "https://files.pythonhosted.org/packages/c2/58/a49e7fa3e09254102339d5c10ae10eaf76fd13b8a124accf518fbf2eb3fc/kiwisolver-1.4.4-cp39-cp39-win32.whl", hash = "sha256:da7e547706e69e45d95e116e6939488d62174e033b763ab1496b4c29b76fabea"}, + {url = "https://files.pythonhosted.org/packages/c5/05/e24f2cb424a34d78d65fe5dfd930419fd42d8fcf83e796ed187d42ac034c/kiwisolver-1.4.4-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da152d8cdcab0e56e4f45eb08b9aea6455845ec83172092f09b0e077ece2cf7a"}, + {url = "https://files.pythonhosted.org/packages/c5/52/3f96b6761fc70fb3e2fc8189fc1bc1ced3350321e6690189a1b3c6463bc8/kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:f0a1dbdb5ecbef0d34eb77e56fcb3e95bbd7e50835d9782a45df81cc46949750"}, + {url = "https://files.pythonhosted.org/packages/c7/cf/40d5c5d4f91b2d5cb3aadad9a1074964749a19e1054cef3d491cfa73a25e/kiwisolver-1.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8ad8285b01b0d4695102546b342b493b3ccc6781fc28c8c6a1bb63e95d22f09"}, + {url = "https://files.pythonhosted.org/packages/ca/0d/ddabc096cebfdcade8c77e42b7a30650fbd8024dfd2d01ab4036a7f0adfe/kiwisolver-1.4.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:841293b17ad704d70c578f1f0013c890e219952169ce8a24ebc063eecf775454"}, + {url = "https://files.pythonhosted.org/packages/cb/16/cd9684584d1f2ea5df14c483e6d9a121886817da3dc8740ccc7e6a170541/kiwisolver-1.4.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d1573129aa0fd901076e2bfb4275a35f5b7aa60fbfb984499d661ec950320b0"}, + {url = "https://files.pythonhosted.org/packages/cb/56/a7c407d437f82eb92954b618bbc71af28a3d88634901f69500d186209a85/kiwisolver-1.4.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ed58b8acf29798b036d347791141767ccf65eee7f26bde03a71c944449e53de"}, + {url = "https://files.pythonhosted.org/packages/cb/ab/a94286c03f19851cfabeba28dde406ba29ca048d77d3342f7699268af649/kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:5853eb494c71e267912275e5586fe281444eb5e722de4e131cddf9d442615626"}, + {url = "https://files.pythonhosted.org/packages/cf/93/52792dd319af16897c248e5e053a806ef83648212e66ec3738aaf7238094/kiwisolver-1.4.4-cp310-cp310-win32.whl", hash = "sha256:e92a513161077b53447160b9bd8f522edfbed4bd9759e4c18ab05d7ef7e49408"}, + {url = "https://files.pythonhosted.org/packages/e1/f7/9ffb2a9e67a38707bab30f8591adaa8f137abe91a83d667400c81bb024bc/kiwisolver-1.4.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:62ac9cc684da4cf1778d07a89bf5f81b35834cb96ca523d3a7fb32509380cbf6"}, + {url = "https://files.pythonhosted.org/packages/e3/9a/697ff811a50e294eaf9df24d5ea03616d9d44fdd114ed18d92be95687fd4/kiwisolver-1.4.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:28bc5b299f48150b5f822ce68624e445040595a4ac3d59251703779836eceff9"}, + {url = "https://files.pythonhosted.org/packages/eb/db/b7ebaa2d35f9fb55f3ff8328b5e9dc049f6524dca737cea13e6235ab191d/kiwisolver-1.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:10ee06759482c78bdb864f4109886dff7b8a56529bc1609d4f1112b93fe6423c"}, + {url = "https://files.pythonhosted.org/packages/ed/bf/7994af5c838c761b4998044dfabecce8c9f428479e32fe77edc7336dcfd2/kiwisolver-1.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e0ea21f66820452a3f5d1655f8704a60d66ba1191359b96541eaf457710a5fc6"}, + {url = "https://files.pythonhosted.org/packages/ee/c2/99b2d61dc246844498e68571c589e37ed7a866a4914cb2da2d66d141b596/kiwisolver-1.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c79ebe8f3676a4c6630fd3f777f3cfecf9289666c84e775a67d1d358578dc2e3"}, + {url = "https://files.pythonhosted.org/packages/f2/e2/7ed98290955aa83598d0e5672d88bbc193192cdcd23d3a9ed7e536cf8e55/kiwisolver-1.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f0a71d85ecdd570ded8ac3d1c0f480842f49a40beb423bb8014539a9f32a5897"}, + {url = "https://files.pythonhosted.org/packages/f6/e8/194a4b4eee0990a648711bfb769a7110d10fd8d8b370a0464cb3d1060381/kiwisolver-1.4.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:70e7c2e7b750585569564e2e5ca9845acfaa5da56ac46df68414f29fea97be9f"}, + {url = "https://files.pythonhosted.org/packages/f8/f4/724d454e95c7e9be6a05f1da3fb85251b5dbb8c3b7d06bdc61d56b16035a/kiwisolver-1.4.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:36dafec3d6d6088d34e2de6b85f9d8e2324eb734162fba59d2ba9ed7a2043d5b"}, +] +"lazy-loader 0.2" = [ + {url = "https://files.pythonhosted.org/packages/0e/c9/acfefeec32fe7090430f515c2bd376da650414a3df0b466030dd56512def/lazy_loader-0.2.tar.gz", hash = "sha256:0edc7a5175c400acb108f283749951fefdadedeb00adcec6e88b974a9254f18a"}, + {url = "https://files.pythonhosted.org/packages/a1/a8/c41f46b47a381bd60a40c0ef00d2fd1722b743b178f9c1cec0da949043de/lazy_loader-0.2-py3-none-any.whl", hash = "sha256:c35875f815c340f823ce3271ed645045397213f961b40ad0c0d395c3f5218eeb"}, +] +"librosa 0.10.0.post2" = [ + {url = "https://files.pythonhosted.org/packages/10/1e/0f7d6662b6f35dc6ce1429902b38784eba72cee3beedd19af621ba1ca2dc/librosa-0.10.0.post2.tar.gz", hash = "sha256:6623673da30773beaae962cb4685f188155582f25bc60fc52da968f59eea8567"}, + {url = "https://files.pythonhosted.org/packages/5c/26/e1127810de8b60a58bfa682f858fd7ba36667d29c0b9ad3b6ff10d6cb944/librosa-0.10.0.post2-py3-none-any.whl", hash = "sha256:0f3b56118cb01ea89df4b04e924c7f48c5c13d42cc55a12540eb04ae87ab5848"}, +] +"llvmlite 0.39.1" = [ + {url = "https://files.pythonhosted.org/packages/07/31/a5f5f578a2b19938e1bb91dcd79bd436557baf582dde23845cb0e76a2241/llvmlite-0.39.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6546bed4e02a1c3d53a22a0bced254b3b6894693318b16c16c8e43e29d6befb6"}, + {url = "https://files.pythonhosted.org/packages/09/1f/629a04882c9b3dea4848337b80c9b550d8c713dce87528f7f8c3ab3b277f/llvmlite-0.39.1-cp310-cp310-win32.whl", hash = "sha256:d0bfd18c324549c0fec2c5dc610fd024689de6f27c6cc67e4e24a07541d6e49b"}, + {url = "https://files.pythonhosted.org/packages/11/61/d2dc91f46c588cbf5b8e57f193d79dbd0d2352e9271808837f87281bd240/llvmlite-0.39.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39dc2160aed36e989610fc403487f11b8764b6650017ff367e45384dff88ffbf"}, + {url = "https://files.pythonhosted.org/packages/14/27/1468111538f33bd9fb13c0b2c1534c7a487cec8fadf14e318d73be18e4e1/llvmlite-0.39.1.tar.gz", hash = "sha256:b43abd7c82e805261c425d50335be9a6c4f84264e34d6d6e475207300005d572"}, + {url = "https://files.pythonhosted.org/packages/31/4e/412c9f557ac007e5cac75c3593fe159db7f44d8caea4122b9251216737e1/llvmlite-0.39.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1578f5000fdce513712e99543c50e93758a954297575610f48cb1fd71b27c08a"}, + {url = "https://files.pythonhosted.org/packages/32/ba/545b37dd07a44889f6591c7994290b7a57cdcaaf0c795516b5593d65c424/llvmlite-0.39.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c0f158e4708dda6367d21cf15afc58de4ebce979c7a1aa2f6b977aae737e2a54"}, + {url = "https://files.pythonhosted.org/packages/36/83/b5125da6f8f59c9ed77e259444e30c67da3f287938a20a61ccaa8e0591e9/llvmlite-0.39.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3f331a323d0f0ada6b10d60182ef06c20a2f01be21699999d204c5750ffd0b4"}, + {url = "https://files.pythonhosted.org/packages/3e/59/a2f5a20268cd261fb81ce2134267fe6f947bbff9401dbc4f12cfeb089fa6/llvmlite-0.39.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fa9b26939ae553bf30a9f5c4c754db0fb2d2677327f2511e674aa2f5df941789"}, + {url = "https://files.pythonhosted.org/packages/45/b5/5013d162337c07b11c946b2842bac146434a5c33f6e6e5011e6cf164bf83/llvmlite-0.39.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:62c0ea22e0b9dffb020601bb65cb11dd967a095a488be73f07d8867f4e327ca5"}, + {url = "https://files.pythonhosted.org/packages/5a/ff/b655fa1764c0c764752496676ed0076631880535e2b67c34c776b1f9a9bc/llvmlite-0.39.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ffc84ade195abd4abcf0bd3b827b9140ae9ef90999429b9ea84d5df69c9058c"}, + {url = "https://files.pythonhosted.org/packages/67/d6/7623e80301e9f65b9ce3ac2656e29fab3c91a575e589750ee2a1b03decac/llvmlite-0.39.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e4f212c018db951da3e1dc25c2651abc688221934739721f2dad5ff1dd5f90e7"}, + {url = "https://files.pythonhosted.org/packages/6f/78/15e11f84531c3e4e078ed2faa4e6e078ef2a0c06c6275020bc10c3865e9c/llvmlite-0.39.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50aea09a2b933dab7c9df92361b1844ad3145bfb8dd2deb9cd8b8917d59306fb"}, + {url = "https://files.pythonhosted.org/packages/75/7f/9055977016e713a5c033c376a9ea9cb3d1092a02ee1421c41ccbcc5aa043/llvmlite-0.39.1-cp38-cp38-win_amd64.whl", hash = "sha256:fb62fc7016b592435d3e3a8f680e3ea8897c3c9e62e6e6cc58011e7a4801439e"}, + {url = "https://files.pythonhosted.org/packages/82/7d/479ea497d2b453064c28e531339c070d70a4a92b8674dd587ab4e126d618/llvmlite-0.39.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2c00ff204afa721b0bb9835b5bf1ba7fba210eefcec5552a9e05a63219ba0dc"}, + {url = "https://files.pythonhosted.org/packages/8c/dd/dbf1d9c68d91ee3e14bf962d195e18f877a26fdb6556dcfea8951e47e4e7/llvmlite-0.39.1-cp38-cp38-win32.whl", hash = "sha256:4c6ebace910410daf0bebda09c1859504fc2f33d122e9a971c4c349c89cca630"}, + {url = "https://files.pythonhosted.org/packages/90/2c/fd3adee418409d8be7f4de0e183ed5fb0d18359f5a2367906e659b03b669/llvmlite-0.39.1-cp310-cp310-win_amd64.whl", hash = "sha256:7ebf1eb9badc2a397d4f6a6c8717447c81ac011db00064a00408bc83c923c0e4"}, + {url = "https://files.pythonhosted.org/packages/91/38/6b50b0a8ef737083dc2f4a359d42282b2fb104dae97674dcfc85a48c693b/llvmlite-0.39.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16f56eb1eec3cda3a5c526bc3f63594fc24e0c8d219375afeb336f289764c6c7"}, + {url = "https://files.pythonhosted.org/packages/94/ae/a24ff97a39a8b4c60b93c63ccde867249c1d5dab03d790e85d64f99c0db3/llvmlite-0.39.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60f8dd1e76f47b3dbdee4b38d9189f3e020d22a173c00f930b52131001d801f9"}, + {url = "https://files.pythonhosted.org/packages/be/05/af5fd325c52b73255a51691300f00b06177dfe1088b90ea20dd49c6e0c23/llvmlite-0.39.1-cp37-cp37m-win_amd64.whl", hash = "sha256:e172c73fccf7d6db4bd6f7de963dedded900d1a5c6778733241d878ba613980e"}, + {url = "https://files.pythonhosted.org/packages/c8/38/a65240eacb5a0e092b13e09a9466f8759654825a850d1b4cb28a6f932722/llvmlite-0.39.1-cp39-cp39-win_amd64.whl", hash = "sha256:3fc14e757bc07a919221f0cbaacb512704ce5774d7fcada793f1996d6bc75f2a"}, + {url = "https://files.pythonhosted.org/packages/ca/2f/408d0c5583a5ebb3ca79c13c47b06c77084558c51d1fab4d015d14671c00/llvmlite-0.39.1-cp39-cp39-win32.whl", hash = "sha256:03aee0ccd81735696474dc4f8b6be60774892a2929d6c05d093d17392c237f32"}, + {url = "https://files.pythonhosted.org/packages/d4/a6/42d47c9bbc67ded89504fb91c3b900eca7b89019a53bb5404da1bd56e2d5/llvmlite-0.39.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1ec3d70b3e507515936e475d9811305f52d049281eaa6c8273448a61c9b5b7e2"}, + {url = "https://files.pythonhosted.org/packages/d8/fb/7bb4a1fa11420dd01fdd8c370a5f2c812a70f1f8acf5a45c2b4c2c9d6338/llvmlite-0.39.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6717c7a6e93c9d2c3d07c07113ec80ae24af45cde536b34363d4bcd9188091d9"}, + {url = "https://files.pythonhosted.org/packages/d9/86/685e82d14156becc7d260a80b46af3a079004f51657e4a2957cf872445cb/llvmlite-0.39.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ddab526c5a2c4ccb8c9ec4821fcea7606933dc53f510e2a6eebb45a418d3488a"}, + {url = "https://files.pythonhosted.org/packages/df/e3/2e07af7ad70187756f28e480dec5e34701e44012f587fa9e5d0b61003a6d/llvmlite-0.39.1-cp37-cp37m-win32.whl", hash = "sha256:b1a0bbdb274fb683f993198775b957d29a6f07b45d184c571ef2a721ce4388cf"}, + {url = "https://files.pythonhosted.org/packages/e3/0b/68d4a88f76a6e11cc8701c48f63240ee1ce3be7463554212b4adaae1d2a8/llvmlite-0.39.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3803f11ad5f6f6c3d2b545a303d68d9fabb1d50e06a8d6418e6fcd2d0df00959"}, + {url = "https://files.pythonhosted.org/packages/eb/48/497215f37f36562ad2c803f73f7878776e063c018b3fa935ba32cfc0bc40/llvmlite-0.39.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e31f4b799d530255aaf0566e3da2df5bfc35d3cd9d6d5a3dcc251663656c27b1"}, + {url = "https://files.pythonhosted.org/packages/f5/d4/4cd3efa0a6e5f63b17ba7e383bb9e90f8008c539bb51dbe5bb7b7aabaed1/llvmlite-0.39.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22d36591cd5d02038912321d9ab8e4668e53ae2211da5523f454e992b5e13c36"}, +] +"matplotlib 3.7.1" = [ + {url = "https://files.pythonhosted.org/packages/05/92/8a7449693adc4480a4777b407b44d21c0c6e3d2ace3250091fe1a89bc825/matplotlib-3.7.1-cp39-cp39-win_amd64.whl", hash = "sha256:3ba2af245e36990facf67fde840a760128ddd71210b2ab6406e640188d69d136"}, + {url = "https://files.pythonhosted.org/packages/07/67/0d84ca088fa164ac9ad9bf1a896517ee9eeb98a27a315e1bcad619cde30c/matplotlib-3.7.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3032884084f541163f295db8a6536e0abb0db464008fadca6c98aaf84ccf4717"}, + {url = "https://files.pythonhosted.org/packages/07/76/fde990f131450f08eb06e50814b98d347b14d7916c0ec31cba0a65a9be2b/matplotlib-3.7.1-cp310-cp310-win_amd64.whl", hash = "sha256:3d7bc90727351fb841e4d8ae620d2d86d8ed92b50473cd2b42ce9186104ecbba"}, + {url = "https://files.pythonhosted.org/packages/0e/61/255b0ab4fd319bb8274bde67eeb8b56e52c4d1b66123a6ed3de2b835d108/matplotlib-3.7.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:f67bfdb83a8232cb7a92b869f9355d677bce24485c460b19d01970b64b2ed476"}, + {url = "https://files.pythonhosted.org/packages/10/94/36527e47d0719e7ae3649cc290a4d0b5faeac3453867cdf633f4b2480d87/matplotlib-3.7.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:438196cdf5dc8d39b50a45cb6e3f6274edbcf2254f85fa9b895bf85851c3a613"}, + {url = "https://files.pythonhosted.org/packages/13/0d/a3c01d8dd48957029f5ea5eac3d778fdedefaef43533597def65e29e5414/matplotlib-3.7.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e99bc9e65901bb9a7ce5e7bb24af03675cbd7c70b30ac670aa263240635999a4"}, + {url = "https://files.pythonhosted.org/packages/16/23/d81f74e722eb064e726e7b6da999fd9e50de13b28e6496d67e9f09b6fe19/matplotlib-3.7.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:cf0e4f727534b7b1457898c4f4ae838af1ef87c359b76dcd5330fa31893a3ac7"}, + {url = "https://files.pythonhosted.org/packages/18/70/31f7b317e5575b590ed7227a9c3d6f42f8e8838ce7d5e763ff16440ac1d4/matplotlib-3.7.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8704726d33e9aa8a6d5215044b8d00804561971163563e6e6591f9dcf64340cc"}, + {url = "https://files.pythonhosted.org/packages/1d/24/72b0b7069d268b22c40f42d973f4b4971debd0f9ddc0fbf4753d5f0a2469/matplotlib-3.7.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:544764ba51900da4639c0f983b323d288f94f65f4024dc40ecb1542d74dc0500"}, + {url = "https://files.pythonhosted.org/packages/1e/5c/bae8d15f7dec470ee0269d503132678e5ce4abd1306b70b180d66ede13d5/matplotlib-3.7.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83111e6388dec67822e2534e13b243cc644c7494a4bb60584edbff91585a83c6"}, + {url = "https://files.pythonhosted.org/packages/23/7c/3e9366cf2259785de934d0bb5a7e03828e23cd722439d1c78abc4b7d89eb/matplotlib-3.7.1-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:770a205966d641627fd5cf9d3cb4b6280a716522cd36b8b284a8eb1581310f61"}, + {url = "https://files.pythonhosted.org/packages/34/b3/c81bbb3de820e0eaba4d7d41b9df34ffdc3336159de5bb6c959cd2028670/matplotlib-3.7.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b867e2f952ed592237a1828f027d332d8ee219ad722345b79a001f49df0936eb"}, + {url = "https://files.pythonhosted.org/packages/35/a8/eb84f46e133fc0be5d50c3e1bec0aaa18a58bb53fc9ea96797289ffb2cd2/matplotlib-3.7.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a2cb34336110e0ed8bb4f650e817eed61fa064acbefeb3591f1b33e3a84fd96"}, + {url = "https://files.pythonhosted.org/packages/3a/4e/83499803b641c40e33c118b461a7c110bfa8cc6b3be10a2dc174232522dd/matplotlib-3.7.1-cp311-cp311-win32.whl", hash = "sha256:fbdeeb58c0cf0595efe89c05c224e0a502d1aa6a8696e68a73c3efc6bc354304"}, + {url = "https://files.pythonhosted.org/packages/4d/71/a0d28c6123773075f056ff2ce7b2a16a19a4ff2982f0de0f285c9866c420/matplotlib-3.7.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8bf26ade3ff0f27668989d98c8435ce9327d24cffb7f07d24ef609e33d582439"}, + {url = "https://files.pythonhosted.org/packages/51/3e/2e266434cf7aa82ab5d860b774a1ece49debffa3f5c32f7c6e305f0f5728/matplotlib-3.7.1-cp38-cp38-win32.whl", hash = "sha256:7c9a4b2da6fac77bcc41b1ea95fadb314e92508bf5493ceff058e727e7ecf5b0"}, + {url = "https://files.pythonhosted.org/packages/51/d8/ba69105b4b72aace5d3501af1b0886b248fa5363519df04dc17577578bab/matplotlib-3.7.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a867bf73a7eb808ef2afbca03bcdb785dae09595fbe550e1bab0cd023eba3de0"}, + {url = "https://files.pythonhosted.org/packages/5c/b0/050bcf86c57255066915eb805d36409e2e093a07d4615249b07aa2530ef5/matplotlib-3.7.1-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:6eb88d87cb2c49af00d3bbc33a003f89fd9f78d318848da029383bfc08ecfbfb"}, + {url = "https://files.pythonhosted.org/packages/5d/22/f55638bea4af17edf23e1c919ad5d256141bbeec0196c450be9785f1dcb6/matplotlib-3.7.1-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4cf327e98ecf08fcbb82685acaf1939d3338548620ab8dfa02828706402c34de"}, + {url = "https://files.pythonhosted.org/packages/5d/7e/0647f19705d819d2249df96625d83ff5de2e913a247610b753c504b7bfd0/matplotlib-3.7.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2bf092f9210e105f414a043b92af583c98f50050559616930d884387d0772aba"}, + {url = "https://files.pythonhosted.org/packages/62/6d/3817522ca223796703b68ffd38577582f2dc7a0c0dd410d1803e36b5e1db/matplotlib-3.7.1-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:95cbc13c1fc6844ab8812a525bbc237fa1470863ff3dace7352e910519e194b1"}, + {url = "https://files.pythonhosted.org/packages/6c/70/5f8b981680fc0bdb85f0dd00174e41f98d4cdb641921295822c8e14272fe/matplotlib-3.7.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:28506a03bd7f3fe59cd3cd4ceb2a8d8a2b1db41afede01f66c42561b9be7b4b7"}, + {url = "https://files.pythonhosted.org/packages/80/c0/1d29eafc057e516ffc1ba07da2054926f219c44ad4ea5df57ff97825182c/matplotlib-3.7.1-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:81a6b377ea444336538638d31fdb39af6be1a043ca5e343fe18d0f17e098770b"}, + {url = "https://files.pythonhosted.org/packages/81/f7/1c9145e24195723da3cb668637b98b6016be4692b335ba543058a7297c9e/matplotlib-3.7.1-cp39-cp39-win32.whl", hash = "sha256:4f99e1b234c30c1e9714610eb0c6d2f11809c9c78c984a613ae539ea2ad2eb4b"}, + {url = "https://files.pythonhosted.org/packages/83/3e/08d551274d660cd38af04b14366725c195f18ad0bd359e192ecf3ec2f2bb/matplotlib-3.7.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:97cc368a7268141afb5690760921765ed34867ffb9655dd325ed207af85c7529"}, + {url = "https://files.pythonhosted.org/packages/86/2b/a04f22015a03025a8c9c0363c4ecfd89eb45fc3af545ff838e02ac58b39d/matplotlib-3.7.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:08308bae9e91aca1ec6fd6dda66237eef9f6294ddb17f0d0b3c863169bf82353"}, + {url = "https://files.pythonhosted.org/packages/89/f3/84a9a6613ab0d89931d785f13fa2606e03f07252875acc8ebf5b676fa3c5/matplotlib-3.7.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb7d248c34a341cd4c31a06fd34d64306624c8cd8d0def7abb08792a5abfd556"}, + {url = "https://files.pythonhosted.org/packages/8a/d3/35c62c9f64ddef5f25763580a10cb1ff4a19dc1a2bf940ad06dbb10b248d/matplotlib-3.7.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56d94989191de3fcc4e002f93f7f1be5da476385dde410ddafbb70686acf00ea"}, + {url = "https://files.pythonhosted.org/packages/91/ae/410dca50b2b0b4d48bfaa41a20d7344078ac63a7178d3b5716b1014c90b9/matplotlib-3.7.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:21e9cff1a58d42e74d01153360de92b326708fb205250150018a52c70f43c290"}, + {url = "https://files.pythonhosted.org/packages/92/01/2c04d328db6955d77f8f60c17068dde8aa66f153b2c599ca03c2cb0d5567/matplotlib-3.7.1-cp38-cp38-win_amd64.whl", hash = "sha256:14645aad967684e92fc349493fa10c08a6da514b3d03a5931a1bac26e6792bd1"}, + {url = "https://files.pythonhosted.org/packages/9f/77/0cd22f92f7103383cb1ce3b3efc77411b9cc3a495242c8f2a623b498f586/matplotlib-3.7.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f883a22a56a84dba3b588696a2b8a1ab0d2c3d41be53264115c71b0a942d8fdb"}, + {url = "https://files.pythonhosted.org/packages/a8/14/83b722ae5bec25cd1b44067d2165952aa0943af287ea06f2e1e594220805/matplotlib-3.7.1-cp310-cp310-win32.whl", hash = "sha256:ce463ce590f3825b52e9fe5c19a3c6a69fd7675a39d589e8b5fbe772272b3a24"}, + {url = "https://files.pythonhosted.org/packages/b7/65/d6e00376dbdb6c227d79a2d6ec32f66cfb163f0cd924090e3133a4f85a11/matplotlib-3.7.1.tar.gz", hash = "sha256:7b73305f25eab4541bd7ee0b96d87e53ae9c9f1823be5659b806cd85786fe882"}, + {url = "https://files.pythonhosted.org/packages/b7/ed/32d00261ac6067b13af9181b2450d30599875ab61807defa85c05a28432a/matplotlib-3.7.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:57bfb8c8ea253be947ccb2bc2d1bb3862c2bccc662ad1b4626e1f5e004557042"}, + {url = "https://files.pythonhosted.org/packages/bc/22/52dd9b0f8da380309ceb4c5e6a9018417b56ad3b56bfd18fe0e1d1310541/matplotlib-3.7.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:617f14ae9d53292ece33f45cba8503494ee199a75b44de7717964f70637a36aa"}, + {url = "https://files.pythonhosted.org/packages/bd/5d/a9083be15f9bed89c1c5897473eae6dd1bab4acbcfb82fdae417149674d0/matplotlib-3.7.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:def58098f96a05f90af7e92fd127d21a287068202aa43b2a93476170ebd99e87"}, + {url = "https://files.pythonhosted.org/packages/cf/2c/41b330eeb47806abc19c1a4ab22821cb5a2be2cabe34c37f0d8483ae0e26/matplotlib-3.7.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75d4725d70b7c03e082bbb8a34639ede17f333d7247f56caceb3801cb6ff703d"}, + {url = "https://files.pythonhosted.org/packages/e0/2e/a9fc4c317bc8cc679d344dd881b97f67580b38e6eedc645c3623d6c5d139/matplotlib-3.7.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89768d84187f31717349c6bfadc0e0d8c321e8eb34522acec8a67b1236a66332"}, + {url = "https://files.pythonhosted.org/packages/e8/5a/2661b38ebd4b1d58f335be7e8150af0a7eb94d13bf7a6563e7c49ed40c4d/matplotlib-3.7.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8c587963b85ce41e0a8af53b9b2de8dddbf5ece4c34553f7bd9d066148dc719c"}, + {url = "https://files.pythonhosted.org/packages/f3/99/7010ae81984908cc655b7d24145aeed2784614957ed7f0cb5a72b17a63d3/matplotlib-3.7.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:46a561d23b91f30bccfd25429c3c706afe7d73a5cc64ef2dfaf2b2ac47c1a5dc"}, + {url = "https://files.pythonhosted.org/packages/fb/8c/391e3c105edb7e193bb163ed48988135228d0b5ce3143e1cbec2350e23c8/matplotlib-3.7.1-cp311-cp311-win_amd64.whl", hash = "sha256:c0bd19c72ae53e6ab979f0ac6a3fafceb02d2ecafa023c5cca47acd934d10be7"}, +] +"msgpack 1.0.5" = [ + {url = "https://files.pythonhosted.org/packages/0a/04/bc319ba061f6dc9077745988be288705b3f9f18c5a209772a3e8fcd419fd/msgpack-1.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9985b214f33311df47e274eb788a5893a761d025e2b92c723ba4c63936b69b1"}, + {url = "https://files.pythonhosted.org/packages/0d/90/44edef4a8c6f035b054c4b017c5adcb22a35ec377e17e50dd5dced279a6b/msgpack-1.0.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:916723458c25dfb77ff07f4c66aed34e47503b2eb3188b3adbec8d8aa6e00f48"}, + {url = "https://files.pythonhosted.org/packages/0e/69/3d10e741dd2bbb806af5cdc76551735baab5f5f9773701eb05502c913a6e/msgpack-1.0.5-cp311-cp311-win32.whl", hash = "sha256:c396e2cc213d12ce017b686e0f53497f94f8ba2b24799c25d913d46c08ec422c"}, + {url = "https://files.pythonhosted.org/packages/10/ca/50c3a5e92d459a942169747315afd8c226d05427eccff903ddf33135c574/msgpack-1.0.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20a97bf595a232c3ee6d57ddaadd5453d174a52594bf9c21d10407e2a2d9b3bd"}, + {url = "https://files.pythonhosted.org/packages/10/fe/9e004c4deb457f1ef1ad88c1188da5691ff1855e0d03a5ac3635ae1f6530/msgpack-1.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:55b56a24893105dc52c1253649b60f475f36b3aa0fc66115bffafb624d7cb30b"}, + {url = "https://files.pythonhosted.org/packages/12/6e/0cfd1dc07f61a6ac606587a393f489c3ca463469d285a73c8e5e2f61b021/msgpack-1.0.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:20c784e66b613c7f16f632e7b5e8a1651aa5702463d61394671ba07b2fc9e025"}, + {url = "https://files.pythonhosted.org/packages/17/10/be97811782473d709d07b65a3955a5a76d47686aff3d62bb41d48aea7c92/msgpack-1.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:362d9655cd369b08fda06b6657a303eb7172d5279997abe094512e919cf74b11"}, + {url = "https://files.pythonhosted.org/packages/18/3f/3860151fbdf50e369bbe4ffd307a588417669c725025e383f3ce5893690f/msgpack-1.0.5-cp39-cp39-win32.whl", hash = "sha256:ac9dd47af78cae935901a9a500104e2dea2e253207c924cc95de149606dc43cc"}, + {url = "https://files.pythonhosted.org/packages/19/0c/2c3b443df88f5d400f2e19a3d867564d004b26e137f18c2f2663913987bc/msgpack-1.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:476a8fe8fae289fdf273d6d2a6cb6e35b5a58541693e8f9f019bfe990a51e4ba"}, + {url = "https://files.pythonhosted.org/packages/1a/f7/df5814697c25bdebb14ea97d27ddca04f5d4c6e249f096d086fea521c139/msgpack-1.0.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b72d0698f86e8d9ddf9442bdedec15b71df3598199ba33322d9711a19f08145c"}, + {url = "https://files.pythonhosted.org/packages/27/ad/4edfe383ec3185611441179ffee8cbc8155d7575fbad73f6d31015e35451/msgpack-1.0.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9f5ae84c5c8a857ec44dc180a8b0cc08238e021f57abdf51a8182e915e6299f0"}, + {url = "https://files.pythonhosted.org/packages/28/8f/c58c53c884217cc572c19349c7e1129b5a6eae36df0a017aae3a8f3d7aa8/msgpack-1.0.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:addab7e2e1fcc04bd08e4eb631c2a90960c340e40dfc4a5e24d2ff0d5a3b3edb"}, + {url = "https://files.pythonhosted.org/packages/29/56/1fb6b96aab759ab3bc05b03ba6d936b350db72aac203cde56ea6bd001237/msgpack-1.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d25dd59bbbbb996eacf7be6b4ad082ed7eacc4e8f3d2df1ba43822da9bfa122a"}, + {url = "https://files.pythonhosted.org/packages/2b/c4/f2c8695ae69d1425eddc5e2f849c525b562dc8409bc2979e525f3dd4fecd/msgpack-1.0.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56a62ec00b636583e5cb6ad313bbed36bb7ead5fa3a3e38938503142c72cba4f"}, + {url = "https://files.pythonhosted.org/packages/2b/d4/9165cf113f9b86ce55e36f36bc6cd9e0c5601b0ade02741b2ead8b5dc254/msgpack-1.0.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a2b031c2e9b9af485d5e3c4520f4220d74f4d222a5b8dc8c1a3ab9448ca79c57"}, + {url = "https://files.pythonhosted.org/packages/2c/e9/c79ecc36cfa34d850a01773565e0fccafd69efff07172028c3a5f758b83f/msgpack-1.0.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b2de4c1c0538dcb7010902a2b97f4e00fc4ddf2c8cda9749af0e594d3b7fa3d7"}, + {url = "https://files.pythonhosted.org/packages/2f/21/e488871f8e498efe14821b0c870eb95af52cfafb9b8dd41d83fad85b383b/msgpack-1.0.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48296af57cdb1d885843afd73c4656be5c76c0c6328db3440c9601a98f303d87"}, + {url = "https://files.pythonhosted.org/packages/33/0a/aa7b53ae17cf1dc1c352d705ab3162fc572c55048cc3177c1a88009c47fd/msgpack-1.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ab2f3331cb1b54165976a9d976cb251a83183631c88076613c6c780f0d6e45a"}, + {url = "https://files.pythonhosted.org/packages/33/52/099f0dde1283bac7bf267ab941dfa3b7c89ee701e4252973f8d3c10e68d6/msgpack-1.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:379026812e49258016dd84ad79ac8446922234d498058ae1d415f04b522d5b2d"}, + {url = "https://files.pythonhosted.org/packages/34/3c/34e94b091b3fdf941dbce5bc619e2fa5488d49fdf00944b50f5a1d6e1871/msgpack-1.0.5-cp36-cp36m-win32.whl", hash = "sha256:b5ef2f015b95f912c2fcab19c36814963b5463f1fb9049846994b007962743e9"}, + {url = "https://files.pythonhosted.org/packages/3c/e5/3d436bed11849ba05d777ed3fd1a0440170bad460335ea541dd6946047ed/msgpack-1.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:4867aa2df9e2a5fa5f76d7d5565d25ec76e84c106b55509e78c1ede0f152659a"}, + {url = "https://files.pythonhosted.org/packages/3e/80/bc7fdb75a35bf32c7c529c247dcadfd0502aac2309e207a89b0be6fe42ea/msgpack-1.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cb47c21a8a65b165ce29f2bec852790cbc04936f502966768e4aae9fa763cb7"}, + {url = "https://files.pythonhosted.org/packages/43/87/6507d56f62b958d822ae4ffe1c4507ed7d3cf37ad61114665816adcf4adc/msgpack-1.0.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a740fa0e4087a734455f0fc3abf5e746004c9da72fbd541e9b113013c8dc3282"}, + {url = "https://files.pythonhosted.org/packages/45/85/6b55b0cabad846d3e730226a897f878f8f63ee505668bb6c55a697b0bfb0/msgpack-1.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bf22a83f973b50f9d38e55c6aade04c41ddda19b00c4ebc558930d78eecc64ed"}, + {url = "https://files.pythonhosted.org/packages/45/e1/6408389bd2cf0c339ea317926beb64d100f60bc8d236ac59f1c1162be2e4/msgpack-1.0.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1d46dfe3832660f53b13b925d4e0fa1432b00f5f7210eb3ad3bb9a13c6204a6"}, + {url = "https://files.pythonhosted.org/packages/49/57/a28120d82f8e77622a1e1efc652389c71145f6b89b47b39814a7c6038373/msgpack-1.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57e1f3528bd95cc44684beda696f74d3aaa8a5e58c816214b9046512240ef437"}, + {url = "https://files.pythonhosted.org/packages/4b/3d/cc5eb6d69e0ecde80a78cc42f48579971ec333e509d56a4a6de1a2c40ba2/msgpack-1.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:266fa4202c0eb94d26822d9bfd7af25d1e2c088927fe8de9033d929dd5ba24c5"}, + {url = "https://files.pythonhosted.org/packages/56/50/bfcc0fad07067b6f1b09d940272ec749d5fe82570d938c2348c3ad0babf7/msgpack-1.0.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:821c7e677cc6acf0fd3f7ac664c98803827ae6de594a9f99563e48c5a2f27eb0"}, + {url = "https://files.pythonhosted.org/packages/59/67/f992ada3b42889f1b984e5651d63ea21ca3a92049cff6d75fe0a4a63e422/msgpack-1.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e42b9594cc3bf4d838d67d6ed62b9e59e201862a25e9a157019e171fbe672dd3"}, + {url = "https://files.pythonhosted.org/packages/60/bc/af94acdebc26b8d92d5673d20529438aa225698dc23338fb43c875c8968e/msgpack-1.0.5-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:f933bbda5a3ee63b8834179096923b094b76f0c7a73c1cfe8f07ad608c58844b"}, + {url = "https://files.pythonhosted.org/packages/62/57/170af6c6fccd2d950ea01e1faa58cae9643226fa8705baded11eca3aa8b5/msgpack-1.0.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c05a4a96585525916b109bb85f8cb6511db1c6f5b9d9cbcbc940dc6b4be944b"}, + {url = "https://files.pythonhosted.org/packages/62/5c/9c7fed4ca0235a2d7b8d15b4047c328976b97d2b227719e54cad1e47c244/msgpack-1.0.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef8108f8dedf204bb7b42994abf93882da1159728a2d4c5e82012edd92c9da9f"}, + {url = "https://files.pythonhosted.org/packages/67/f8/e3ab674f4a945308362e9342297fe6b35a89dd0f648aa325aabffa5dc210/msgpack-1.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3055b0455e45810820db1f29d900bf39466df96ddca11dfa6d074fa47054376d"}, + {url = "https://files.pythonhosted.org/packages/6b/6d/de239d77d347f1990c41b4800075a15e06f748186dd120166270dd071734/msgpack-1.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28592e20bbb1620848256ebc105fc420436af59515793ed27d5c77a217477705"}, + {url = "https://files.pythonhosted.org/packages/6b/79/0dec8f035160464ca88b221cc79691a71cf88dc25207c17f1d918b2c7bb0/msgpack-1.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c4c68d87497f66f96d50142a2b73b97972130d93677ce930718f68828b382e2"}, + {url = "https://files.pythonhosted.org/packages/6c/fa/3ca00fb1e53bcacf8c186fa6aff2d2086862b12e289bcf38227d9d40bd86/msgpack-1.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:06f5174b5f8ed0ed919da0e62cbd4ffde676a374aba4020034da05fab67b9164"}, + {url = "https://files.pythonhosted.org/packages/6c/fe/8a7747ca57074307a2e8f1de58441952a9dbdf9e8a8e5873d53a5ce0835c/msgpack-1.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5494ea30d517a3576749cad32fa27f7585c65f5f38309c88c6d137877fa28a5a"}, + {url = "https://files.pythonhosted.org/packages/72/ac/2eda5af7cd1450c52d031e48c76b280eac5bb2e588678876612f95be34ab/msgpack-1.0.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e57916ef1bd0fee4f21c4600e9d1da352d8816b52a599c46460e93a6e9f17086"}, + {url = "https://files.pythonhosted.org/packages/73/99/f338ce8b69e934c04e5d9187f85de1ae395882cd56e7deb48e78a1749af8/msgpack-1.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cdc793c50be3f01106245a61b739328f7dccc2c648b501e237f0699fe1395b81"}, + {url = "https://files.pythonhosted.org/packages/7b/e9/b47f9e93fc381885624c40cbbbd0480b18ae11ca588162fe724d43495372/msgpack-1.0.5-cp310-cp310-win32.whl", hash = "sha256:382b2c77589331f2cb80b67cc058c00f225e19827dbc818d700f61513ab47bea"}, + {url = "https://files.pythonhosted.org/packages/7e/1c/9d0fd241a4e88e1cd2f5babea4a27ac25b1b86dbbc05fa10741e82079a93/msgpack-1.0.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe5c63197c55bce6385d9aee16c4d0641684628f63ace85f73571e65ad1c1e8d"}, + {url = "https://files.pythonhosted.org/packages/80/f0/c1fadb4e4a38fda19e35b1b6f887d72cc9c57778af43b53f64a8cd62e922/msgpack-1.0.5-cp38-cp38-win32.whl", hash = "sha256:1c0f7c47f0087ffda62961d425e4407961a7ffd2aa004c81b9c07d9269512f6e"}, + {url = "https://files.pythonhosted.org/packages/95/c9/560c3203c4327881c9f2de26c42dacdd9567bfe7fa43458e2a680c4bdcaf/msgpack-1.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:1967f6129fc50a43bfe0951c35acbb729be89a55d849fab7686004da85103f1c"}, + {url = "https://files.pythonhosted.org/packages/9a/0b/ea8a49d24654f9e8604ea78b80a4d7b0cc31817d8fb6987001223ae7feaf/msgpack-1.0.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:36961b0568c36027c76e2ae3ca1132e35123dcec0706c4b7992683cc26c1320c"}, + {url = "https://files.pythonhosted.org/packages/9f/4a/36d936e54cf71e23ad276564465f6a54fb129e3d61520b76e13e0bb29167/msgpack-1.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:525228efd79bb831cf6830a732e2e80bc1b05436b086d4264814b4b2955b2fa9"}, + {url = "https://files.pythonhosted.org/packages/a2/e0/f3d5dd7809cf5728bb1bae683032ce50547d009be6551054815a8bf2a2da/msgpack-1.0.5-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:4c075728a1095efd0634a7dccb06204919a2f67d1893b6aa8e00497258bf926c"}, + {url = "https://files.pythonhosted.org/packages/ab/ff/ca74e519c47139b6c08fb21db5ead2bd2eed6cb1225f9be69390cdb48182/msgpack-1.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:586d0d636f9a628ddc6a17bfd45aa5b5efaf1606d2b60fa5d87b8986326e933f"}, + {url = "https://files.pythonhosted.org/packages/b8/bc/1d5fe4732dc78ff86aaf677596da08f0ae736e60ca8ab49c1f1c7366cb1a/msgpack-1.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ed40e926fa2f297e8a653c954b732f125ef97bdd4c889f243182299de27e2aa9"}, + {url = "https://files.pythonhosted.org/packages/bf/68/032e62ad44f92ba6a4ae7c45054843cdec7f0c405ecdfd166f25123b0c47/msgpack-1.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18334484eafc2b1aa47a6d42427da7fa8f2ab3d60b674120bce7a895a0a85bdd"}, + {url = "https://files.pythonhosted.org/packages/c1/57/01f2d8805160f559ec21d095fc7576a26fbaed2475af24ce4a135c380c14/msgpack-1.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9e6ca5d5699bcd89ae605c150aee83b5321f2115695e741b99618f4856c50898"}, + {url = "https://files.pythonhosted.org/packages/c2/3b/70d1eaaafb451679663a72164c46fadfb93f59c90f584dcd77289f90e4c5/msgpack-1.0.5-cp36-cp36m-win_amd64.whl", hash = "sha256:288e32b47e67f7b171f86b030e527e302c91bd3f40fd9033483f2cacc37f327a"}, + {url = "https://files.pythonhosted.org/packages/c5/c1/1b591574ba71481fbf38359a8fca5108e4ad130a6dbb9b2acb3e9277d0fe/msgpack-1.0.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:137850656634abddfb88236008339fdaba3178f4751b28f270d2ebe77a563b6c"}, + {url = "https://files.pythonhosted.org/packages/ce/b8/89cb1809b076a4651169851aa1f98128b75cbfe14034b914c9040b13c4cf/msgpack-1.0.5-cp37-cp37m-win32.whl", hash = "sha256:cb5aaa8c17760909ec6cb15e744c3ebc2ca8918e727216e79607b7bbce9c8f77"}, + {url = "https://files.pythonhosted.org/packages/d3/32/9b7a2dba9485dd7d201e4e00638fbf86e0d535a91653889c5b4dc813efdf/msgpack-1.0.5-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:366c9a7b9057e1547f4ad51d8facad8b406bab69c7d72c0eb6f529cf76d4b85f"}, + {url = "https://files.pythonhosted.org/packages/da/46/855bdcbf004fd87b6a4451e8dcd61329439dcd9039887f71ca5085769216/msgpack-1.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:bae7de2026cbfe3782c8b78b0db9cbfc5455e079f1937cb0ab8d133496ac55e1"}, + {url = "https://files.pythonhosted.org/packages/dc/a1/eba11a0d4b764bc62966a565b470f8c6f38242723ba3057e9b5098678c30/msgpack-1.0.5.tar.gz", hash = "sha256:c075544284eadc5cddc70f4757331d99dcbc16b2bbd4849d15f8aae4cf36d31c"}, + {url = "https://files.pythonhosted.org/packages/e8/1f/be19c9c9cfdcc2ae8ee8c65dbe5f281cc1f3331f9b9523735f39b090b448/msgpack-1.0.5-cp37-cp37m-win_amd64.whl", hash = "sha256:ab31e908d8424d55601ad7075e471b7d0140d4d3dd3272daf39c5c19d936bd82"}, + {url = "https://files.pythonhosted.org/packages/e8/60/78906f564804aae23eb1102eca8b8830f1e08a649c179774c05fa7dc0aad/msgpack-1.0.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:17358523b85973e5f242ad74aa4712b7ee560715562554aa2134d96e7aa4cbbf"}, + {url = "https://files.pythonhosted.org/packages/e9/f1/45b73a9e97f702bcb5f51569b93990e456bc969363e55122374c22ed7d24/msgpack-1.0.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a61215eac016f391129a013c9e46f3ab308db5f5ec9f25811e811f96962599a8"}, + {url = "https://files.pythonhosted.org/packages/ef/13/c110d89d5079169354394dc226e6f84d818722939bc1fe3f9c25f982e903/msgpack-1.0.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1835c84d65f46900920b3708f5ba829fb19b1096c1800ad60bae8418652a951d"}, + {url = "https://files.pythonhosted.org/packages/f1/1f/cc3e8274934c8323f6106dae22cba8bad413166f4efb3819573de58c215c/msgpack-1.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:332360ff25469c346a1c5e47cbe2a725517919892eda5cfaffe6046656f0b7bb"}, + {url = "https://files.pythonhosted.org/packages/f2/da/770118f8d48e11cc9a2c7cb60d7d3c8016266526bd42c6ff5bd21013d099/msgpack-1.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4f8d8b3bf1ff2672567d6b5c725a1b347fe838b912772aa8ae2bf70338d5a198"}, + {url = "https://files.pythonhosted.org/packages/f5/80/ef9c31210ac580163c0de2db4fb3179c6a3f1228c18fd366280e01d9e5d2/msgpack-1.0.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f837b93669ce4336e24d08286c38761132bc7ab29782727f8557e1eb21b2080"}, +] +"numba 0.56.4" = [ + {url = "https://files.pythonhosted.org/packages/1a/66/de416cd8364c7e5cba8da9272809676e907e7045cdcb750f6ff5fff70c29/numba-0.56.4-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:9f62672145f8669ec08762895fe85f4cf0ead08ce3164667f2b94b2f62ab23c3"}, + {url = "https://files.pythonhosted.org/packages/22/6e/880d8ae26f26a3ecce71922797cc09b3b8a4e5274adecd0793f9b59d50b8/numba-0.56.4-cp38-cp38-win_amd64.whl", hash = "sha256:91f021145a8081f881996818474ef737800bcc613ffb1e618a655725a0f9e246"}, + {url = "https://files.pythonhosted.org/packages/23/e6/78206b38a7cb823a09d5141e8d8f16701ec76d24a1bb2b91708ca890d6f3/numba-0.56.4-cp38-cp38-win32.whl", hash = "sha256:03fe94cd31e96185cce2fae005334a8cc712fc2ba7756e52dff8c9400718173f"}, + {url = "https://files.pythonhosted.org/packages/28/8d/7b7dd56751eee1745c99dd6435d06c01ba40642cf3022b1c3e88d38a9dc0/numba-0.56.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:85dbaed7a05ff96492b69a8900c5ba605551afb9b27774f7f10511095451137c"}, + {url = "https://files.pythonhosted.org/packages/32/75/9a2c33670e3c95530472c3f89f1f6617d68f47101f9e765c0892170b22df/numba-0.56.4-cp37-cp37m-win32.whl", hash = "sha256:fcdf84ba3ed8124eb7234adfbb8792f311991cbf8aed1cad4b1b1a7ee08380c1"}, + {url = "https://files.pythonhosted.org/packages/34/1a/bd24676dd4677045f9772b0f2cc9adc7a27332c0b8c82353621f86935d6a/numba-0.56.4-cp37-cp37m-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:dbcc847bac2d225265d054993a7f910fda66e73d6662fe7156452cac0325b073"}, + {url = "https://files.pythonhosted.org/packages/44/6b/b65f2f11f2bf83d49084bbf9d080139fcfcf8b27642fd76ba6eed23c1889/numba-0.56.4-cp38-cp38-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:720886b852a2d62619ae3900fe71f1852c62db4f287d0c275a60219e1643fc04"}, + {url = "https://files.pythonhosted.org/packages/58/a4/859605be01d9979fecde5e94ed6662d9a85853f9849f396d9a84455f4846/numba-0.56.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4e08e203b163ace08bad500b0c16f6092b1eb34fd1fce4feaf31a67a3a5ecf3b"}, + {url = "https://files.pythonhosted.org/packages/60/14/5dbefc1cf3b6a4c36968e7391c341b32226c5d00757efd61fe5f3d96a32e/numba-0.56.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0240f9026b015e336069329839208ebd70ec34ae5bfbf402e4fcc8e06197528e"}, + {url = "https://files.pythonhosted.org/packages/6b/b5/b0a0af320c43f2925c699e8613382d3669829b585717ef2d795a06187564/numba-0.56.4-cp37-cp37m-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8a95ca9cc77ea4571081f6594e08bd272b66060634b8324e99cd1843020364f9"}, + {url = "https://files.pythonhosted.org/packages/7c/00/ff9a3f34c7862afe80b7ee5f1b78ff21271cf48d42a337d9669316b81b8c/numba-0.56.4-cp39-cp39-win32.whl", hash = "sha256:14dbbabf6ffcd96ee2ac827389afa59a70ffa9f089576500434c34abf9b054a4"}, + {url = "https://files.pythonhosted.org/packages/95/39/41a11c34d56944f1bf49759f7e15d19d80508c0239ed9cd246a012374334/numba-0.56.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c602d015478b7958408d788ba00a50272649c5186ea8baa6cf71d4a1c761bba1"}, + {url = "https://files.pythonhosted.org/packages/9e/e8/d439b8eab78745573c3d4822954d081b793f2a7e17478d3d2c93a3709e2d/numba-0.56.4-cp39-cp39-win_amd64.whl", hash = "sha256:0da583c532cd72feefd8e551435747e0e0fbb3c0530357e6845fcc11e38d6aea"}, + {url = "https://files.pythonhosted.org/packages/a2/70/28f2c417a3660784b0df35edea9ea9201663606c7cdcd3f81b32dae11321/numba-0.56.4-cp310-cp310-win32.whl", hash = "sha256:0611e6d3eebe4cb903f1a836ffdb2bda8d18482bcd0a0dcc56e79e2aa3fefef5"}, + {url = "https://files.pythonhosted.org/packages/a9/28/2babef91a7c2f84718d8c47ecd89216913cf9e130d302208c3cfd0d17122/numba-0.56.4-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:3cb1a07a082a61df80a468f232e452d818f5ae254b40c26390054e4e868556e0"}, + {url = "https://files.pythonhosted.org/packages/aa/eb/2781f65d4523d7fbbbf85f0dd63b4e6dadcc441523065801c39f7908cf27/numba-0.56.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a12ef323c0f2101529d455cfde7f4135eaa147bad17afe10b48634f796d96abd"}, + {url = "https://files.pythonhosted.org/packages/ac/ae/119514059a9ff6b95cda9e6d0a3540b987a939fa23077874fc0dd6f7ae45/numba-0.56.4-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:553da2ce74e8862e18a72a209ed3b6d2924403bdd0fb341fa891c6455545ba7c"}, + {url = "https://files.pythonhosted.org/packages/ad/5a/b79eda6012461c4dea6c5c4122c7438843d617f960d2d635f044a97ab912/numba-0.56.4-cp37-cp37m-win_amd64.whl", hash = "sha256:42f9e1be942b215df7e6cc9948cf9c15bb8170acc8286c063a9e57994ef82fd1"}, + {url = "https://files.pythonhosted.org/packages/ce/44/ec8efc38c4a64d84dbc508ab56d3c9e2faeb6ddfb9aa9afc1a3754d46ea1/numba-0.56.4-cp39-cp39-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:03634579d10a6129181129de293dd6b5eaabee86881369d24d63f8fe352dd6cb"}, + {url = "https://files.pythonhosted.org/packages/d6/ca/f14d880e74a8f4581cc474e30c6c25b8d7febf3e95be7c5156c9c60daa39/numba-0.56.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c75e8a5f810ce80a0cfad6e74ee94f9fde9b40c81312949bf356b7304ef20740"}, + {url = "https://files.pythonhosted.org/packages/db/e4/8ab9e0cde9efe95a1c1f9ecbf91ac897aae36e1ca5667a5c54e089d2bfb2/numba-0.56.4-cp37-cp37m-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d69ad934e13c15684e7887100a8f5f0f61d7a8e57e0fd29d9993210089a5b531"}, + {url = "https://files.pythonhosted.org/packages/dc/b0/b722cde279d5c879c1bb7a307337b16a25068818a4ff5ee2e01fd2c605b4/numba-0.56.4-cp310-cp310-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:f4cfc3a19d1e26448032049c79fc60331b104f694cf570a9e94f4e2c9d0932bb"}, + {url = "https://files.pythonhosted.org/packages/de/e8/d948883e14f822a82d8113c3a074fadd5cf4cc920fcb8444ec57a5960f59/numba-0.56.4-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:d0ae9270a7a5cc0ede63cd234b4ff1ce166c7a749b91dbbf45e0000c56d3eade"}, + {url = "https://files.pythonhosted.org/packages/df/06/b363a48cf4893ea64de772c0de9d5200bed98f2c2d16dff886848de15f2a/numba-0.56.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4373da9757049db7c90591e9ec55a2e97b2b36ba7ae3bf9c956a513374077470"}, + {url = "https://files.pythonhosted.org/packages/e2/1e/de917b683bb5f0b6078fb1397293eab84c4eaa825fbf94d73d6488eb354f/numba-0.56.4.tar.gz", hash = "sha256:32d9fef412c81483d7efe0ceb6cf4d3310fde8b624a9cecca00f790573ac96ee"}, + {url = "https://files.pythonhosted.org/packages/e5/68/f05524d613227589ecd666aaf6b27e39c9870cc1c50eb42b2ef2b4f8a65d/numba-0.56.4-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e64d338b504c9394a4a34942df4627e1e6cb07396ee3b49fe7b8d6420aa5104f"}, + {url = "https://files.pythonhosted.org/packages/f9/05/0a6b011d041fb7120e48c8e41b7670e1e99d04bca578f58ba10d278929f3/numba-0.56.4-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3a993349b90569518739009d8f4b523dfedd7e0049e6838c0e17435c3e70dcc4"}, + {url = "https://files.pythonhosted.org/packages/fa/a1/e9dad5793d45f08401aa6983a93d53423ce4a403ae333937fa2448b263b2/numba-0.56.4-cp310-cp310-win_amd64.whl", hash = "sha256:fbfb45e7b297749029cb28694abf437a78695a100e7c2033983d69f0ba2698d4"}, +] +"numpy 1.23.5" = [ + {url = "https://files.pythonhosted.org/packages/08/36/6589c7d5fc4fecda63de4453fefff7c58f6de2b1bb7dfbe7fa807bf85c46/numpy-1.23.5-cp39-cp39-win_amd64.whl", hash = "sha256:09b7847f7e83ca37c6e627682f145856de331049013853f344f37b0c9690e3df"}, + {url = "https://files.pythonhosted.org/packages/0f/3d/25e99f2191cce5029310c41cf9a34b5107d4475477bbce2f6d2e68c1c93b/numpy-1.23.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9a909a8bae284d46bbfdefbdd4a262ba19d3bc9921b1e76126b1d21c3c34135"}, + {url = "https://files.pythonhosted.org/packages/0f/ae/dad4b8e7c65494cbbd1c063de114efaf9acd0f5f6171f044f0d4b6299787/numpy-1.23.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9c88793f78fca17da0145455f0d7826bcb9f37da4764af27ac945488116efe63"}, + {url = "https://files.pythonhosted.org/packages/19/0d/b8c34e4baf258d77a8592bdce45183e9a12874c167f5966c7dd467b74ea9/numpy-1.23.5-cp311-cp311-win_amd64.whl", hash = "sha256:0cbe9848fad08baf71de1a39e12d1b6310f1d5b2d0ea4de051058e6e1076852d"}, + {url = "https://files.pythonhosted.org/packages/25/7b/3b587a62aa54ad7ecf90eabfc77cf78e96d3df1d0e8c31fc534ad3ca6e17/numpy-1.23.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:abdde9f795cf292fb9651ed48185503a2ff29be87770c3b8e2a14b0cd7aa16f8"}, + {url = "https://files.pythonhosted.org/packages/2b/1a/9ac00116d3a64b5ea031fdb2ff071062a6e2140553fa0770b5f007b84252/numpy-1.23.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5039f55555e1eab31124a5768898c9e22c25a65c1e0037f4d7c495a45778c9f2"}, + {url = "https://files.pythonhosted.org/packages/3f/ce/04d7772671d8d3a14e426d7560047821c4e2d29ee2b5cfa252601412083b/numpy-1.23.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:01dd17cbb340bf0fc23981e52e1d18a9d4050792e8fb8363cecbf066a84b827d"}, + {url = "https://files.pythonhosted.org/packages/42/38/775b43da55fa7473015eddc9a819571517d9a271a9f8134f68fb9be2f212/numpy-1.23.5.tar.gz", hash = "sha256:1b1766d6f397c18153d40015ddfc79ddb715cabadc04d2d228d4e5a8bc4ded1a"}, + {url = "https://files.pythonhosted.org/packages/4c/42/6274f92514fbefcb1caa66d56d82ac7ac89f7652c0cef1e159a4b79e09f1/numpy-1.23.5-cp38-cp38-win_amd64.whl", hash = "sha256:ca51fcfcc5f9354c45f400059e88bc09215fb71a48d3768fb80e357f3b457e1e"}, + {url = "https://files.pythonhosted.org/packages/4c/b9/038abd6fbd67b05b03cb1af590cfc02b7f1e5a37af7ac6a868f5093c29f5/numpy-1.23.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33161613d2269025873025b33e879825ec7b1d831317e68f4f2f0f84ed14c719"}, + {url = "https://files.pythonhosted.org/packages/4d/39/d33202cc56c21123a50c6d5e160d00c18ff685ab864dbd4bf80dd40a7af9/numpy-1.23.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e9f4c4e51567b616be64e05d517c79a8a22f3606499941d97bb76f2ca59f982d"}, + {url = "https://files.pythonhosted.org/packages/5d/a1/cdac656aed8bc04dc86296490f8dbef68474c3294cc31af30f2bd0ec06de/numpy-1.23.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf837dc63ba5c06dc8797c398db1e223a466c7ece27a1f7b5232ba3466aafe3d"}, + {url = "https://files.pythonhosted.org/packages/63/d4/3f0d610a2006434f2b7b2e0c80291368d59b0a03bb3e1911fdb9476232d4/numpy-1.23.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0aaee12d8883552fadfc41e96b4c82ee7d794949e2a7c3b3a7201e968c7ecab9"}, + {url = "https://files.pythonhosted.org/packages/67/6b/d7c93d458d16464da9b3f560a20c363a19e242ebbb019bd1e1d797523851/numpy-1.23.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7903ba8ab592b82014713c491f6c5d3a1cde5b4a3bf116404e08f5b52f6daf43"}, + {url = "https://files.pythonhosted.org/packages/6a/03/ae6c3c307f9c5c7516de3df3e764ebb1de33e54e197f0370992138433ef4/numpy-1.23.5-cp310-cp310-win_amd64.whl", hash = "sha256:dbee87b469018961d1ad79b1a5d50c0ae850000b639bcb1b694e9981083243b6"}, + {url = "https://files.pythonhosted.org/packages/6e/7f/94797cfe0263a30805f3074e535adfde02b885ac43d1e4dac85f82213b0b/numpy-1.23.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ce571367b6dfe60af04e04a1834ca2dc5f46004ac1cc756fb95319f64c095a96"}, + {url = "https://files.pythonhosted.org/packages/8c/7a/171d3b4a54de835c8f95181dd2885607c0e04adca55ef99d9de559b4c9ba/numpy-1.23.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8969bfd28e85c81f3f94eb4a66bc2cf1dbdc5c18efc320af34bffc54d6b1e38f"}, + {url = "https://files.pythonhosted.org/packages/9b/55/a2669debe264b1f22a8133734595128e40b96a8066e17e53e8d160168e41/numpy-1.23.5-cp311-cp311-win32.whl", hash = "sha256:b2a9ab7c279c91974f756c84c365a669a887efa287365a8e2c418f8b3ba73fb0"}, + {url = "https://files.pythonhosted.org/packages/9e/9d/ff17c357f7144301da85f8c03d56593cfd2904e9ce89f86c8eefaa96d2d5/numpy-1.23.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a7ac231a08bb37f852849bbb387a20a57574a97cfc7b6cabb488a4fc8be176de"}, + {url = "https://files.pythonhosted.org/packages/af/92/8efba008b9bda66456a1844a0e133dc76c08c5fb68c67a674f046211db29/numpy-1.23.5-cp310-cp310-win32.whl", hash = "sha256:522e26bbf6377e4d76403826ed689c295b0b238f46c28a7251ab94716da0b280"}, + {url = "https://files.pythonhosted.org/packages/b8/d0/e6a2cb9a3f3e863a43e50949e9ae704be70baf398fd5af59355f65c8740a/numpy-1.23.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56e454c7833e94ec9769fa0f86e6ff8e42ee38ce0ce1fa4cbb747ea7e06d56aa"}, + {url = "https://files.pythonhosted.org/packages/b9/0e/10ab011eaebeed29d28ad710d0a3ab2654c06a2800e178e8f2f3a5947ad4/numpy-1.23.5-cp38-cp38-win32.whl", hash = "sha256:06005a2ef6014e9956c09ba07654f9837d9e26696a0470e42beedadb78c11b07"}, + {url = "https://files.pythonhosted.org/packages/bf/d1/1017fe3f5d65c4fe054a793f18f940d913868bb2846a02d3f6244a829a30/numpy-1.23.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92c8c1e89a1f5028a4c6d9e3ccbe311b6ba53694811269b992c0b224269e2398"}, + {url = "https://files.pythonhosted.org/packages/c6/4f/63f6f16d3f44a764a3b66c6233e133baf912e198a93e14c39ee991f587d0/numpy-1.23.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d208a0f8729f3fb790ed18a003f3a57895b989b40ea4dce4717e9cf4af62c6bb"}, + {url = "https://files.pythonhosted.org/packages/d2/55/b9b4bfb9d1d828d7d3192c4059e7b4a7d755ba2e1618089af4be77c152d1/numpy-1.23.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f063b69b090c9d918f9df0a12116029e274daf0181df392839661c4c7ec9018a"}, + {url = "https://files.pythonhosted.org/packages/d5/95/f311e6fdaabe24f909eeb6d5482e3adef27fa8389cb8a84823ae560bf480/numpy-1.23.5-cp39-cp39-win32.whl", hash = "sha256:af1da88f6bc3d2338ebbf0e22fe487821ea4d8e89053e25fa59d1d79786e7481"}, + {url = "https://files.pythonhosted.org/packages/e4/f3/679b3a042a127de0d7c84874913c3e23bb84646eb3bc6ecab3f8c872edc9/numpy-1.23.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e05b1c973a9f858c74367553e236f287e749465f773328c8ef31abe18f691e1"}, + {url = "https://files.pythonhosted.org/packages/e8/ad/b935c7421657a032fd2a5332eed098f3b9993a155afceb1daa280ff6611f/numpy-1.23.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58f545efd1108e647604a1b5aa809591ccd2540f468a880bedb97247e72db387"}, +] +"nvidia-cublas-cu11 11.10.3.66" = [ + {url = "https://files.pythonhosted.org/packages/7a/08/57e6b6481af73590259a9600c32a68eb853966e354fca147cde17ed9ea27/nvidia_cublas_cu11-11.10.3.66-py3-none-win_amd64.whl", hash = "sha256:8ac17ba6ade3ed56ab898a036f9ae0756f1e81052a317bf98f8c6d18dc3ae49e"}, + {url = "https://files.pythonhosted.org/packages/ce/41/fdeb62b5437996e841d83d7d2714ca75b886547ee8017ee2fe6ea409d983/nvidia_cublas_cu11-11.10.3.66-py3-none-manylinux1_x86_64.whl", hash = "sha256:d32e4d75f94ddfb93ea0a5dda08389bcc65d8916a25cb9f37ac89edaeed3bded"}, +] +"nvidia-cuda-nvrtc-cu11 11.7.99" = [ + {url = "https://files.pythonhosted.org/packages/9e/10/c9fc448f33d439981d6a74b693526871c4ef13e8d81a7b4de12e3a12a1b9/nvidia_cuda_nvrtc_cu11-11.7.99-py3-none-win_amd64.whl", hash = "sha256:f2effeb1309bdd1b3854fc9b17eaf997808f8b25968ce0c7070945c4265d64a3"}, + {url = "https://files.pythonhosted.org/packages/ea/8d/0709ba16c2831c17ec1c2ea1eeb89ada11ffa8d966d773cce0a7463b22bb/nvidia_cuda_nvrtc_cu11-11.7.99-py3-none-manylinux1_x86_64.whl", hash = "sha256:f7d9610d9b7c331fa0da2d1b2858a4a8315e6d49765091d28711c8946e7425e7"}, + {url = "https://files.pythonhosted.org/packages/ef/25/922c5996aada6611b79b53985af7999fc629aee1d5d001b6a22431e18fec/nvidia_cuda_nvrtc_cu11-11.7.99-2-py3-none-manylinux1_x86_64.whl", hash = "sha256:9f1562822ea264b7e34ed5930567e89242d266448e936b85bc97a3370feabb03"}, +] +"nvidia-cuda-runtime-cu11 11.7.99" = [ + {url = "https://files.pythonhosted.org/packages/32/2c/d89ea2b4051fbabff8d2edda8c735dabae6d5d1b8d5215f9749d38dcdb72/nvidia_cuda_runtime_cu11-11.7.99-py3-none-win_amd64.whl", hash = "sha256:bc77fa59a7679310df9d5c70ab13c4e34c64ae2124dd1efd7e5474b71be125c7"}, + {url = "https://files.pythonhosted.org/packages/36/92/89cf558b514125d2ebd8344dd2f0533404b416486ff681d5434a5832a019/nvidia_cuda_runtime_cu11-11.7.99-py3-none-manylinux1_x86_64.whl", hash = "sha256:cc768314ae58d2641f07eac350f40f99dcb35719c4faff4bc458a7cd2b119e31"}, +] +"nvidia-cudnn-cu11 8.5.0.96" = [ + {url = "https://files.pythonhosted.org/packages/db/69/4d28d4706946f89fffe3f87373a079ae95dc17f9c0fcd840fe570c67e36b/nvidia_cudnn_cu11-8.5.0.96-py3-none-manylinux1_x86_64.whl", hash = "sha256:71f8111eb830879ff2836db3cccf03bbd735df9b0d17cd93761732ac50a8a108"}, + {url = "https://files.pythonhosted.org/packages/dc/30/66d4347d6e864334da5bb1c7571305e501dcb11b9155971421bb7bb5315f/nvidia_cudnn_cu11-8.5.0.96-2-py3-none-manylinux1_x86_64.whl", hash = "sha256:402f40adfc6f418f9dae9ab402e773cfed9beae52333f6d86ae3107a1b9527e7"}, +] +"packaging 23.0" = [ + {url = "https://files.pythonhosted.org/packages/47/d5/aca8ff6f49aa5565df1c826e7bf5e85a6df852ee063600c1efa5b932968c/packaging-23.0.tar.gz", hash = "sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97"}, + {url = "https://files.pythonhosted.org/packages/ed/35/a31aed2993e398f6b09a790a181a7927eb14610ee8bbf02dc14d31677f1c/packaging-23.0-py3-none-any.whl", hash = "sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2"}, +] +"pandas 1.5.3" = [ + {url = "https://files.pythonhosted.org/packages/02/4a/8e2513db9d15929b833147f975d8424dc6a3e18100ead10aab78756a1aad/pandas-1.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c4c00e0b0597c8e4f59e8d461f797e5d70b4d025880516a8261b2817c47759ee"}, + {url = "https://files.pythonhosted.org/packages/0e/1d/f964977eea9ed72d5f1c53af56038aca2ce781a0cc8bce8aeb33da039ca1/pandas-1.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:26d9c71772c7afb9d5046e6e9cf42d83dd147b5cf5bcb9d97252077118543792"}, + {url = "https://files.pythonhosted.org/packages/26/c1/469f5d7863a9901d92b795d9fc5c7c4acccd7df62b13367c7fac0d499c3b/pandas-1.5.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:14e45300521902689a81f3f41386dc86f19b8ba8dd5ac5a3c7010ef8d2932813"}, + {url = "https://files.pythonhosted.org/packages/27/c7/35b81ce5f680f2dac55eac14d103245cd8cf656ae4a2ff3be2e69fd1d330/pandas-1.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3ac844a0fe00bfaeb2c9b51ab1424e5c8744f89860b138434a363b1f620f354"}, + {url = "https://files.pythonhosted.org/packages/2b/63/fa344006a41dd696720328af0f1f914f530e9eca2f794607f6af9158897d/pandas-1.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9842b6f4b8479e41968eced654487258ed81df7d1c9b7b870ceea24ed9459b31"}, + {url = "https://files.pythonhosted.org/packages/49/e2/79e46612dc25ebc7603dc11c560baa7266c90f9e48537ecf1a02a0dd6bff/pandas-1.5.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a0a56cef15fd1586726dace5616db75ebcfec9179a3a55e78f72c5639fa2a23"}, + {url = "https://files.pythonhosted.org/packages/53/c9/d2f910dace7ef849b626980d0fd033b9cded36568949c8d560c9630ad2e0/pandas-1.5.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c39a8da13cede5adcd3be1182883aea1c925476f4e84b2807a46e2775306305d"}, + {url = "https://files.pythonhosted.org/packages/54/a0/c62d63c5c69be9aae07836e4d7e25e7a6f5590be3d8f2d53f43eeec5c475/pandas-1.5.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:565fa34a5434d38e9d250af3c12ff931abaf88050551d9fbcdfafca50d62babf"}, + {url = "https://files.pythonhosted.org/packages/56/73/3351beeb807dca69fcc3c4966bcccc51552bd01549a9b13c04ab00a43f21/pandas-1.5.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f2b952406a1588ad4cad5b3f55f520e82e902388a6d5a4a91baa8d38d23c7f6"}, + {url = "https://files.pythonhosted.org/packages/5f/34/b7858bb7d6d6bf4d9df1dde777a11fcf3ff370e1d1b3956e3d0fcca8322c/pandas-1.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:972d8a45395f2a2d26733eb8d0f629b2f90bebe8e8eddbb8829b180c09639572"}, + {url = "https://files.pythonhosted.org/packages/63/8d/c2bd356b9d4baf1c5cf8d7e251fb4540e87083072c905430da48c2bb31eb/pandas-1.5.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e474390e60ed609cec869b0da796ad94f420bb057d86784191eefc62b65819ae"}, + {url = "https://files.pythonhosted.org/packages/74/ee/146cab1ff6d575b54ace8a6a5994048380dc94879b0125b25e62edcb9e52/pandas-1.5.3.tar.gz", hash = "sha256:74a3fd7e5a7ec052f183273dc7b0acd3a863edf7520f5d3a1765c04ffdb3b0b1"}, + {url = "https://files.pythonhosted.org/packages/7d/d6/92be61dca3880c7cec99a9b4acf6260b3dc00519673fdb3e6666ac6096ce/pandas-1.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd05f7783b3274aa206a1af06f0ceed3f9b412cf665b7247eacd83be41cf7bf0"}, + {url = "https://files.pythonhosted.org/packages/90/19/1a92d73cda1233326e787a4c14362a1fcce4c7d9f28316fd769308aefb99/pandas-1.5.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c74a62747864ed568f5a82a49a23a8d7fe171d0c69038b38cedf0976831296fa"}, + {url = "https://files.pythonhosted.org/packages/94/85/89f6547642b28fbd874504a6f548d6be4d88981837a23ab18d76cb773bea/pandas-1.5.3-cp39-cp39-win32.whl", hash = "sha256:7cec0bee9f294e5de5bbfc14d0573f65526071029d036b753ee6507d2a21480a"}, + {url = "https://files.pythonhosted.org/packages/a7/2b/c71df8794e8e75ba1ec9da1c1a2efc946590aa79a05148a4138405ef5f72/pandas-1.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a50d9a4336a9621cab7b8eb3fb11adb82de58f9b91d84c2cd526576b881a0c5a"}, + {url = "https://files.pythonhosted.org/packages/a9/cd/34f6b0780301be81be804d7aa71d571457369e6131e2b330af2b0fed1aad/pandas-1.5.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3749077d86e3a2f0ed51367f30bf5b82e131cc0f14260c4d3e499186fccc4406"}, + {url = "https://files.pythonhosted.org/packages/b0/be/1843b9aff84b98899663e7cad9f45513dfdd11d69cb5bd85c648aaf6a8d4/pandas-1.5.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f76d097d12c82a535fda9dfe5e8dd4127952b45fea9b0276cb30cca5ea313fbc"}, + {url = "https://files.pythonhosted.org/packages/b2/87/e0a0e9a0ab9ede47192aa40887b7e31d048c98326a41d6b57c658d1a809d/pandas-1.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fbcb19d6fceb9e946b3e23258757c7b225ba450990d9ed63ccceeb8cae609f7"}, + {url = "https://files.pythonhosted.org/packages/b8/6c/005bd604994f7cbede4d7bf030614ef49a2213f76bc3d738ecf5b0dcc810/pandas-1.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50869a35cbb0f2e0cd5ec04b191e7b12ed688874bd05dd777c19b28cbea90996"}, + {url = "https://files.pythonhosted.org/packages/bc/bb/359b304fb2d9a97c7344b6ceb585dc22fff864e4f3f1d1511166cd84865e/pandas-1.5.3-cp38-cp38-win32.whl", hash = "sha256:87bd9c03da1ac870a6d2c8902a0e1fd4267ca00f13bc494c9e5a9020920e1d51"}, + {url = "https://files.pythonhosted.org/packages/c2/45/801ecd8434eef0b39cc02795ffae273fe3df3cfcb3f6fff215efbe92d93c/pandas-1.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:dfd681c5dc216037e0b0a2c821f5ed99ba9f03ebcf119c7dac0e9a7b960b9ec9"}, + {url = "https://files.pythonhosted.org/packages/ca/4e/d18db7d5ff9d28264cd2a7e2499b8701108f0e6c698e382cfd5d20685c21/pandas-1.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:41179ce559943d83a9b4bbacb736b04c928b095b5f25dd2b7389eda08f46f373"}, + {url = "https://files.pythonhosted.org/packages/d9/cd/f27c2992cbe05a3e39937f73a4be635a9ec149ec3ca4467d8cf039718994/pandas-1.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:478ff646ca42b20376e4ed3fa2e8d7341e8a63105586efe54fa2508ee087f328"}, + {url = "https://files.pythonhosted.org/packages/da/6d/1235da14daddaa6e47f74ba0c255358f0ce7a6ee05da8bf8eb49161aa6b5/pandas-1.5.3-cp311-cp311-win_amd64.whl", hash = "sha256:bc4c368f42b551bf72fac35c5128963a171b40dce866fb066540eeaf46faa003"}, + {url = "https://files.pythonhosted.org/packages/e1/4d/3eb96e53a9208350ee21615f850c4be9a246d32bf1d34cd36682cb58c3b7/pandas-1.5.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f69c4029613de47816b1bb30ff5ac778686688751a5e9c99ad8c7031f6508e5"}, + {url = "https://files.pythonhosted.org/packages/e2/24/a26af514113fd5eca2d8fe41ba4f22f70dfe6afefde4a6beb6a203570935/pandas-1.5.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6973549c01ca91ec96199e940495219c887ea815b2083722821f1d7abfa2b4dc"}, +] +"pillow 9.4.0" = [ + {url = "https://files.pythonhosted.org/packages/06/50/fd98b6be293b96b02ca0dca15939e8e8d0c7f71d731e9b93e6403487911f/Pillow-9.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:94cdff45173b1919350601f82d61365e792895e3c3a3443cf99819e6fbf717a5"}, + {url = "https://files.pythonhosted.org/packages/09/f3/213bc3f14041002f871837a3130a66cda3b4a2b22b0be9da6fc7a7346a0d/Pillow-9.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd752c5ff1b4a870b7661234694f24b1d2b9076b8bf337321a814c612665f343"}, + {url = "https://files.pythonhosted.org/packages/0a/11/78b9759bb35007e9c769044da6e742cdcfcfdfa2e22ada027520cc0c9c0f/Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db74f5562c09953b2c5f8ec4b7dfd3f5421f31811e97d1dbc0a7c93d6e3a24df"}, + {url = "https://files.pythonhosted.org/packages/0b/ca/c29e319e7892e324e339e3e376c3b4db75d75f0b96620abde0206d2738b3/Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aabdab8ec1e7ca7f1434d042bf8b1e92056245fb179790dc97ed040361f16bfd"}, + {url = "https://files.pythonhosted.org/packages/10/56/cbaf507124e237a60ee32adc271da2d4976ce92a25d3ffca47af1e252b80/Pillow-9.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0dd4c681b82214b36273c18ca7ee87065a50e013112eea7d78c7a1b89a739153"}, + {url = "https://files.pythonhosted.org/packages/17/c0/5b3b961d414512e457bfd6337b085830a2609f8f51c05f1ac685050c76a6/Pillow-9.4.0-cp311-cp311-win32.whl", hash = "sha256:b222090c455d6d1a64e6b7bb5f4035c4dff479e22455c9eaa1bdd4c75b52c80c"}, + {url = "https://files.pythonhosted.org/packages/18/c5/fbbcab5cc53c4278c1843d985c6e8e80c79f993c6c1e07f587f34afc76ee/Pillow-9.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:46c259e87199041583658457372a183636ae8cd56dbf3f0755e0f376a7f9d0e6"}, + {url = "https://files.pythonhosted.org/packages/18/ce/2390e0a84138fb84e7510bbc5a7a8530c2ac5661241531e60b0f85c6f35b/Pillow-9.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a3049a10261d7f2b6514d35bbb7a4dfc3ece4c4de14ef5876c4b7a23a0e566d"}, + {url = "https://files.pythonhosted.org/packages/20/46/8f6f569584425c5250cd26c79ab2f56df42e388e6a737ae8eafa939ac607/Pillow-9.4.0-1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:fb5c1ad6bad98c57482236a21bf985ab0ef42bd51f7ad4e4538e89a997624e12"}, + {url = "https://files.pythonhosted.org/packages/20/98/2bd3aa232e4c4b2db3e9b65876544b23caabbb0db43929253bfb72e520ca/Pillow-9.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:2968c58feca624bb6c8502f9564dd187d0e1389964898f5e9e1fbc8533169157"}, + {url = "https://files.pythonhosted.org/packages/23/59/686cc564bd861e87e7bc4c0fd6a88c4df1f698e3f041bbfeb52ac169633d/Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:575d8912dca808edd9acd6f7795199332696d3469665ef26163cd090fa1f8bfa"}, + {url = "https://files.pythonhosted.org/packages/23/8f/4d428380740a7b83a51a4b25c33d422c59dcece99784f09acf7f0b3e4ee4/Pillow-9.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:09b89ddc95c248ee788328528e6a2996e09eaccddeeb82a5356e92645733be35"}, + {url = "https://files.pythonhosted.org/packages/26/0b/ca34a0b44b7a5ab85e9a71442870f362ebba004a2b350889d2ec12df6bcb/Pillow-9.4.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d197df5489004db87d90b918033edbeee0bd6df3848a204bca3ff0a903bef837"}, + {url = "https://files.pythonhosted.org/packages/2e/48/b8fef18f09668ab53af6c70b7e1465446335e2598a4d2984b20211f0550d/Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3b56206244dc8711f7e8b7d6cad4663917cd5b2d950799425076681e8766286"}, + {url = "https://files.pythonhosted.org/packages/30/ed/ea026ae1405954e06523c533802f5bc5f622b7e7bac5c9da7d9f3488945f/Pillow-9.4.0-cp37-cp37m-win32.whl", hash = "sha256:7ac7594397698f77bce84382929747130765f66406dc2cd8b4ab4da68ade4c6e"}, + {url = "https://files.pythonhosted.org/packages/31/3f/ea3e2b408ca22604c41e5f54fbe72d9aab3815d49c0212d39447e503799d/Pillow-9.4.0-2-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8a2b5874d17e72dfb80d917213abd55d7e1ed2479f38f001f264f7ce7bae757c"}, + {url = "https://files.pythonhosted.org/packages/36/31/9fae23878d894adae29aced659d41a78325669dd23018b26ab355828e870/Pillow-9.4.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:46f39cab8bbf4a384ba7cb0bc8bae7b7062b6a11cfac1ca4bc144dea90d4a9f5"}, + {url = "https://files.pythonhosted.org/packages/40/d1/b646804eb150a94c76abc54576ea885f71030bab6c541ccb9594db5da64a/Pillow-9.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:ed3e4b4e1e6de75fdc16d3259098de7c6571b1a6cc863b1a49e7d3d53e036070"}, + {url = "https://files.pythonhosted.org/packages/43/95/c81019bc15b14fd58862c50af0985429edc7e1dee204cbfc8f64df3f2445/Pillow-9.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:6663977496d616b618b6cfa43ec86e479ee62b942e1da76a2c3daa1c75933ef4"}, + {url = "https://files.pythonhosted.org/packages/45/f6/5881348d74284de2d32141d308456fcc1341b8c449e28d4ffc9a287f8dcb/Pillow-9.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8f127e7b028900421cad64f51f75c051b628db17fb00e099eb148761eed598c9"}, + {url = "https://files.pythonhosted.org/packages/48/e1/910c42ebc15a2ffdaa2e1e6589467b7e5f6f5acdcef8827c375320dbfa88/Pillow-9.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d7081c084ceb58278dd3cf81f836bc818978c0ccc770cbbb202125ddabec6628"}, + {url = "https://files.pythonhosted.org/packages/4d/2d/12eae829bcf4ee211014ed71c6430c8b0d3fc462597dd695867c03d59fcb/Pillow-9.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:ba6612b6548220ff5e9df85261bddc811a057b0b465a1226b39bfb8550616aee"}, + {url = "https://files.pythonhosted.org/packages/4e/9a/3e631adbaf3e539677ecdd8aa7824dcc08347237d5f5dc6d8afc14f62d30/Pillow-9.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0f3269304c1a7ce82f1759c12ce731ef9b6e95b6df829dccd9fe42912cc48569"}, + {url = "https://files.pythonhosted.org/packages/51/57/c12f96c26a7d981fe50b802bacd1faf1dd2f04912397c7abf946a0265883/Pillow-9.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4dfdae195335abb4e89cc9762b2edc524f3c6e80d647a9a81bf81e17e3fb6f0"}, + {url = "https://files.pythonhosted.org/packages/52/75/141b332164bfcd78d3d49b95a36a34b0190f3030d93f686cb596156d368d/Pillow-9.4.0-cp310-cp310-win32.whl", hash = "sha256:f09598b416ba39a8f489c124447b007fe865f786a89dbfa48bb5cf395693132a"}, + {url = "https://files.pythonhosted.org/packages/53/9c/198822d4f9d7a50f17f1e04c5b1e9bf3f0ed8638e76e367490bce79544eb/Pillow-9.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0845adc64fe9886db00f5ab68c4a8cd933ab749a87747555cec1c95acea64b0b"}, + {url = "https://files.pythonhosted.org/packages/54/4f/346b8ea1b772cb6e802ed32a78b18627be6a9d9a29755fa82ea436bb582e/Pillow-9.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:60e7da3a3ad1812c128750fc1bc14a7ceeb8d29f77e0a2356a8fb2aa8925287d"}, + {url = "https://files.pythonhosted.org/packages/5c/2a/72b80cd8a35fac89142afb35aabab6ce2631a3261043b6216664c9137b29/Pillow-9.4.0-1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:43521ce2c4b865d385e78579a082b6ad1166ebed2b1a2293c3be1d68dd7ca3b9"}, + {url = "https://files.pythonhosted.org/packages/5e/1c/3afb5e7cfde05e7bf321b473fd24fa1b0c09a15742a0ec1b25bab57970fc/Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c4ed2ff6760e98d262e0cc9c9a7f7b8a9f61aa4d47c58835cdaf7b0b8811bb"}, + {url = "https://files.pythonhosted.org/packages/5e/7c/293136a5171800001be33c21a51daaca68fae954b543e2c015a6bb81a716/Pillow-9.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:f6e78171be3fb7941f9910ea15b4b14ec27725865a73c15277bc39f5ca4f8391"}, + {url = "https://files.pythonhosted.org/packages/5e/bd/d009056616d6ca130d17116e3b2745416dd1421f748b94106571a7aa2f19/Pillow-9.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7cfc287da09f9d2a7ec146ee4d72d6ea1342e770d975e49a8621bf54eaa8f30f"}, + {url = "https://files.pythonhosted.org/packages/69/6d/17f0ee189732bd16def91c0b440203c829b71e3af24f569cb22d831760cb/Pillow-9.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16a8df99701f9095bea8a6c4b3197da105df6f74e6176c5b410bc2df2fd29a57"}, + {url = "https://files.pythonhosted.org/packages/6a/cc/5b915fd1d4fe9edfd2fb23779079c11fee21535227aabc141f5fae4c97ab/Pillow-9.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5b2f8a31bd43e0f18172d8ac82347c8f37ef3e0b414431157718aa234991b28"}, + {url = "https://files.pythonhosted.org/packages/6e/2f/937e89f838161c09bd17e53b49b8415051473c9ce9b6c55b288a66625b13/Pillow-9.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c5c1362c14aee73f50143d74389b2c158707b4abce2cb055b7ad37ce60738d47"}, + {url = "https://files.pythonhosted.org/packages/73/58/82e581350caed79989aa67f04be16a5fd305ca858e163d3c1467a013717b/Pillow-9.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:765cb54c0b8724a7c12c55146ae4647e0274a839fb6de7bcba841e04298e1011"}, + {url = "https://files.pythonhosted.org/packages/74/11/0545b9a88e11bdb38f3fccc63de9c445ea6f4c521c69dab7c538db905068/Pillow-9.4.0-cp38-cp38-win32.whl", hash = "sha256:df41112ccce5d47770a0c13651479fbcd8793f34232a2dd9faeccb75eb5d0d0d"}, + {url = "https://files.pythonhosted.org/packages/77/ba/2f29a6b7224b3e81ddb4d755c66d311d7f3e7c97e40a7f6ccb628b118633/Pillow-9.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:a96e6e23f2b79433390273eaf8cc94fec9c6370842e577ab10dabdcc7ea0a66b"}, + {url = "https://files.pythonhosted.org/packages/78/19/a3688ff601b8ed7d7edd303cd6cc9b5b69cf2305a43752cf185e6f96521c/Pillow-9.4.0-1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b8c2f6eb0df979ee99433d8b3f6d193d9590f735cf12274c108bd954e30ca858"}, + {url = "https://files.pythonhosted.org/packages/7a/a2/258bc097dd133c66e68f4baa1891a5884fc2d4b8e78092c83635fac16426/Pillow-9.4.0-1-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b70756ec9417c34e097f987b4d8c510975216ad26ba6e57ccb53bc758f490dab"}, + {url = "https://files.pythonhosted.org/packages/7b/d7/3034e0961b19ce2a0e80951918e81939dfff1b635575be28a09348b7d032/Pillow-9.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:0e51f608da093e5d9038c592b5b575cadc12fd748af1479b5e858045fff955a9"}, + {url = "https://files.pythonhosted.org/packages/7c/4b/96aae1deb7f6fd30995e22560263ab1d71728a7880dab109824fc37754de/Pillow-9.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:eaef5d2de3c7e9b21f1e762f289d17b726c2239a42b11e25446abf82b26ac132"}, + {url = "https://files.pythonhosted.org/packages/82/1d/1253394355be9d8ac159dbb4b84265d86d7cc2a74659c73d586c2e1d31a4/Pillow-9.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2e0f87144fcbbe54297cae708c5e7f9da21a4646523456b00cc956bd4c65815"}, + {url = "https://files.pythonhosted.org/packages/83/b1/6f2c58d37a4da33d1b72726303adc335d4cd7ecbee262e84b4d3b28bfe70/Pillow-9.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:e1339790c083c5a4de48f688b4841f18df839eb3c9584a770cbd818b33e26d5d"}, + {url = "https://files.pythonhosted.org/packages/88/ae/2f554e2b2780467211c5a92a3b2f8fb0acd38d4b09ca6ba4bc4cdc1b9f9c/Pillow-9.4.0-1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:3f4cc516e0b264c8d4ccd6b6cbc69a07c6d582d8337df79be1e15a5056b258c9"}, + {url = "https://files.pythonhosted.org/packages/8c/a3/f096c4199c0af6d205a9cf1f3440581614016d9cfcab3a4091ecd5d1e26b/Pillow-9.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:7a21222644ab69ddd9967cfe6f2bb420b460dae4289c9d40ff9a4896e7c35c9a"}, + {url = "https://files.pythonhosted.org/packages/91/1d/57a09a69508a27c1c6caa4197ce7fac5be5b7d736889ba1a20931ff4efca/Pillow-9.4.0-1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:f0caf4a5dcf610d96c3bd32932bfac8aee61c96e60481c2a0ea58da435e25acd"}, + {url = "https://files.pythonhosted.org/packages/95/d2/d444a3a1751874210ff3dd792dc2f27f2052be2a3e5386ddaab4751a7171/Pillow-9.4.0-2-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:013016af6b3a12a2f40b704677f8b51f72cb007dac785a9933d5c86a72a7fe33"}, + {url = "https://files.pythonhosted.org/packages/99/d1/4a4f29204e34a0d253ee0f371930c37ba288ecef652f7f49cb6b4602f13b/Pillow-9.4.0-1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b4b4e9dda4f4e4c4e6896f93e84a8f0bcca3b059de9ddf67dac3c334b1195e1"}, + {url = "https://files.pythonhosted.org/packages/9c/9f/0e5a602fdb6adcc594b1aec4dd7d6162b2540cd5a6ae874871e061a45c52/Pillow-9.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:19005a8e58b7c1796bc0167862b1f54a64d3b44ee5d48152b06bb861458bc0f8"}, + {url = "https://files.pythonhosted.org/packages/9e/73/4aacfaeee07328835131683c27d8246b50b10260ff30982e5d988d04e06f/Pillow-9.4.0-2-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9e5f94742033898bfe84c93c831a6f552bb629448d4072dd312306bab3bd96f1"}, + {url = "https://files.pythonhosted.org/packages/9e/91/f0ae261eaa8e06550e89c169176fbca209b9fc74014581956cd0ffc705ee/Pillow-9.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:28676836c7796805914b76b1837a40f76827ee0d5398f72f7dcc634bae7c6264"}, + {url = "https://files.pythonhosted.org/packages/a2/93/f0d2b2c403cccc1e7f06a2f02cb4b7099cf3a420e0392b6b8496cf0b9c4d/Pillow-9.4.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef21af928e807f10bf4141cad4746eee692a0dd3ff56cfb25fce076ec3cc8abe"}, + {url = "https://files.pythonhosted.org/packages/a2/a2/0e323e6098b3a0a61fb09a61a38dfdb107b2d2df68c437320b8014565983/Pillow-9.4.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e9d7747847c53a16a729b6ee5e737cf170f7a16611c143d95aa60a109a59c336"}, + {url = "https://files.pythonhosted.org/packages/ad/b5/58378730355a42bc504f4a10ef9526e59ce4c8a1bb612a0289a407e2ce79/Pillow-9.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f715c32e774a60a337b2bb8ad9839b4abf75b267a0f18806f6f4f5f1688c4b5a"}, + {url = "https://files.pythonhosted.org/packages/af/29/6d8f5bb2b9559144beeeece33732e5214046a918fbd50ab79c94b2ad07ec/Pillow-9.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:53dcb50fbdc3fb2c55431a9b30caeb2f7027fcd2aeb501459464f0214200a503"}, + {url = "https://files.pythonhosted.org/packages/b7/60/ca708f98a78a530ecc1c1d517cd220ad1c4ff2540b271a3ea7fcc30a6cd0/Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e621b0246192d3b9cb1dc62c78cfa4c6f6d2ddc0ec207d43c0dedecb914f152a"}, + {url = "https://files.pythonhosted.org/packages/b9/17/3f093fcd26c0468fd2b55661461e1a2f1d5429974b888d3164a0fda28b46/Pillow-9.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cb362e3b0976dc994857391b776ddaa8c13c28a16f80ac6522c23d5257156bed"}, + {url = "https://files.pythonhosted.org/packages/b9/ee/88978534a2304540a938316fc3241d2e3a2d8b68834485b1ffce0d7f38e9/Pillow-9.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:451f10ef963918e65b8869e17d67db5e2f4ab40e716ee6ce7129b0cde2876eab"}, + {url = "https://files.pythonhosted.org/packages/ba/8d/ce6327813af015d4e0c05350899b0a7f37156e9d0ae50d57a3aecb6602df/Pillow-9.4.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b52ff4f4e002f828ea6483faf4c4e8deea8d743cf801b74910243c58acc6eda3"}, + {url = "https://files.pythonhosted.org/packages/bc/07/830784e061fb94d67649f3e438ff63cfb902dec6d48ac75aeaaac7c7c30e/Pillow-9.4.0.tar.gz", hash = "sha256:a1c2d7780448eb93fbcc3789bf3916aa5720d942e37945f4056680317f1cd23e"}, + {url = "https://files.pythonhosted.org/packages/c8/08/8387076780f6ed6b6071d43031a53531b260efde6e4404d3399e2a8dd29a/Pillow-9.4.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b9b752ab91e78234941e44abdecc07f1f0d8f51fb62941d32995b8161f68cfe5"}, + {url = "https://files.pythonhosted.org/packages/ca/bd/29b8d1d5542402d9fed6f9cf554faeedc57655c4626aa6f93079d55cb6a5/Pillow-9.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dd5a9c3091a0f414a963d427f920368e2b6a4c2f7527fdd82cde8ef0bc7a327"}, + {url = "https://files.pythonhosted.org/packages/cd/2c/cd096a46f8e1d9110597b21079fdba8eb2148357e0ab6252562ed5904f5a/Pillow-9.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6c6b1389ed66cdd174d040105123a5a1bc91d0aa7059c7261d20e583b6d8cbd2"}, + {url = "https://files.pythonhosted.org/packages/cf/ae/b20344b540ed6a9f38b8bf6444cc102dd4ae3855ba44ddcb092286843b2b/Pillow-9.4.0-2-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:99d92d148dd03fd19d16175b6d355cc1b01faf80dae93c6c3eb4163709edc0a9"}, + {url = "https://files.pythonhosted.org/packages/d3/30/72c6e2eb69156eb6cb926c58d9642bd823d47b621e76a1a1ab97411e9c27/Pillow-9.4.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:653d7fb2df65efefbcbf81ef5fe5e5be931f1ee4332c2893ca638c9b11a409c4"}, + {url = "https://files.pythonhosted.org/packages/dc/8a/ee6c0ecdf39a5674881a9ea82b488751be6feb7723b62c7df64229d60f85/Pillow-9.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:847b114580c5cc9ebaf216dd8c8dbc6b00a3b7ab0131e173d7120e6deade1f57"}, + {url = "https://files.pythonhosted.org/packages/dd/c2/c8ebe8cc6dba0ef953f0c0c272847a08b1dfde4219c056a0cab0768f8eeb/Pillow-9.4.0-cp39-cp39-win32.whl", hash = "sha256:6d9dfb9959a3b0039ee06c1a1a90dc23bac3b430842dcb97908ddde05870601c"}, + {url = "https://files.pythonhosted.org/packages/de/e2/d1dda94185dba4fc019744076e52e2c6b450620938b2ded7b31ba90bd559/Pillow-9.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:e8c5cf126889a4de385c02a2c3d3aba4b00f70234bfddae82a5eaa3ee6d5e3e6"}, + {url = "https://files.pythonhosted.org/packages/e8/b1/55617e272040129919077e403996375fcdfb4f5f5b8c24a7c4e92fb8b17b/Pillow-9.4.0-2-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:9d9a62576b68cd90f7075876f4e8444487db5eeea0e4df3ba298ee38a8d067b0"}, + {url = "https://files.pythonhosted.org/packages/e8/cd/6dbd1286a28a074dd8c47583c2224617c0283e69749a6cea45e084d99c8a/Pillow-9.4.0-2-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:83125753a60cfc8c412de5896d10a0a405e0bd88d0470ad82e0869ddf0cb3848"}, + {url = "https://files.pythonhosted.org/packages/eb/7c/c3b1a932f4d832429b961aaae8d378c877e00b3d0accf50c5df97c595f35/Pillow-9.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:3fa1284762aacca6dc97474ee9c16f83990b8eeb6697f2ba17140d54b453e133"}, + {url = "https://files.pythonhosted.org/packages/ed/cc/a3b981073b62636aad3d6a1c846bd5a703e0a46a61ecef8ab552c432725d/Pillow-9.4.0-2-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:87708d78a14d56a990fbf4f9cb350b7d89ee8988705e58e39bdf4d82c149210f"}, + {url = "https://files.pythonhosted.org/packages/f2/cc/71b11ec996744b704637d9ef53ff924b7d208c41be1d251cca33991f6833/Pillow-9.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0884ba7b515163a1a05440a138adeb722b8a6ae2c2b33aea93ea3118dd3a899e"}, + {url = "https://files.pythonhosted.org/packages/f6/a7/a47d0d461992b1612e836d23b912d22b6795df8413e04719044ea11ecc87/Pillow-9.4.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:5f532a2ad4d174eb73494e7397988e22bf427f91acc8e6ebf5bb10597b49c493"}, + {url = "https://files.pythonhosted.org/packages/fb/18/4752328a96388365e6864b9ba3d3489c8a3d1cef9648267583b03a5f6b8d/Pillow-9.4.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6abfb51a82e919e3933eb137e17c4ae9c0475a25508ea88993bb59faf82f3b35"}, + {url = "https://files.pythonhosted.org/packages/fb/69/a4f510dfd14a17adcbe1b8b238dbba6a4a31de78d75f0d6428735432ee0a/Pillow-9.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:519e14e2c49fcf7616d6d2cfc5c70adae95682ae20f0395e9280db85e8d6c4df"}, + {url = "https://files.pythonhosted.org/packages/fd/41/6e44769918a4a2f5294a19bbbf12f58138fcb0c1c3df4721bc5fe1c6f3bf/Pillow-9.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:54614444887e0d3043557d9dbc697dbb16cfb5a35d672b7a0fcc1ed0cf1c600b"}, +] +"pluggy 1.0.0" = [ + {url = "https://files.pythonhosted.org/packages/9e/01/f38e2ff29715251cf25532b9082a1589ab7e4f571ced434f98d0139336dc/pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, + {url = "https://files.pythonhosted.org/packages/a1/16/db2d7de3474b6e37cbb9c008965ee63835bba517e22cdb8c35b5116b5ce1/pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, +] +"pooch 1.6.0" = [ + {url = "https://files.pythonhosted.org/packages/0c/48/de6235d3a568156a8daf6c6d21c09ffcd3b9e0cbf4ad2cc4d34ff80527bb/pooch-1.6.0.tar.gz", hash = "sha256:57d20ec4b10dd694d2b05bb64bc6b109c6e85a6c1405794ce87ed8b341ab3f44"}, + {url = "https://files.pythonhosted.org/packages/8d/64/8e1bfeda3ba0f267b2d9a918e8ca51db8652d0e1a3412a5b3dbce85d90b6/pooch-1.6.0-py3-none-any.whl", hash = "sha256:3bf0e20027096836b8dbce0152dbb785a269abeb621618eb4bdd275ff1e23c9c"}, +] +"pycparser 2.21" = [ + {url = "https://files.pythonhosted.org/packages/5e/0b/95d387f5f4433cb0f53ff7ad859bd2c6051051cebbb564f139a999ab46de/pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, + {url = "https://files.pythonhosted.org/packages/62/d5/5f610ebe421e85889f2e55e33b7f9a6795bd982198517d912eb1c76e1a53/pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, +] +"pyparsing 3.0.9" = [ + {url = "https://files.pythonhosted.org/packages/6c/10/a7d0fa5baea8fe7b50f448ab742f26f52b80bfca85ac2be9d35cdd9a3246/pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"}, + {url = "https://files.pythonhosted.org/packages/71/22/207523d16464c40a0310d2d4d8926daffa00ac1f5b1576170a32db749636/pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"}, +] +"pytest 7.2.2" = [ + {url = "https://files.pythonhosted.org/packages/b2/68/5321b5793bd506961bd40bdbdd0674e7de4fb873ee7cab33dd27283ad513/pytest-7.2.2-py3-none-any.whl", hash = "sha256:130328f552dcfac0b1cec75c12e3f005619dc5f874f0a06e8ff7263f0ee6225e"}, + {url = "https://files.pythonhosted.org/packages/b9/29/311895d9cd3f003dd58e8fdea36dd895ba2da5c0c90601836f7de79f76fe/pytest-7.2.2.tar.gz", hash = "sha256:c99ab0c73aceb050f68929bc93af19ab6db0558791c6a0715723abe9d0ade9d4"}, +] +"python-dateutil 2.8.2" = [ + {url = "https://files.pythonhosted.org/packages/36/7a/87837f39d0296e723bb9b62bbb257d0355c7f6128853c78955f57342a56d/python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, + {url = "https://files.pythonhosted.org/packages/4c/c4/13b4776ea2d76c115c1d1b84579f3764ee6d57204f6be27119f13a61d0a9/python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, +] +"pytz 2023.3" = [ + {url = "https://files.pythonhosted.org/packages/5e/32/12032aa8c673ee16707a9b6cdda2b09c0089131f35af55d443b6a9c69c1d/pytz-2023.3.tar.gz", hash = "sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588"}, + {url = "https://files.pythonhosted.org/packages/7f/99/ad6bd37e748257dd70d6f85d916cafe79c0b0f5e2e95b11f7fbc82bf3110/pytz-2023.3-py2.py3-none-any.whl", hash = "sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb"}, +] +"requests 2.28.2" = [ + {url = "https://files.pythonhosted.org/packages/9d/ee/391076f5937f0a8cdf5e53b701ffc91753e87b07d66bae4a09aa671897bf/requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"}, + {url = "https://files.pythonhosted.org/packages/d2/f4/274d1dbe96b41cf4e0efb70cbced278ffd61b5c7bb70338b62af94ccb25b/requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"}, +] +"scikit-learn 1.2.2" = [ + {url = "https://files.pythonhosted.org/packages/17/13/d4142c9105507ba363d9f3602941b7baf79763cc17e73fa9be826ba3aa89/scikit_learn-1.2.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9c710ff9f9936ba8a3b74a455ccf0dcf59b230caa1e9ba0223773c490cab1e51"}, + {url = "https://files.pythonhosted.org/packages/27/4a/1afe473760b07663710a75437b795ef37362aebb8bf513ff3bbf78fbd0c6/scikit_learn-1.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:dfeaf8be72117eb61a164ea6fc8afb6dfe08c6f90365bde2dc16456e4bc8e45f"}, + {url = "https://files.pythonhosted.org/packages/2f/fd/9fcbe7fe94150e72d87120cbc462bde1971c3674e726b81f4a4c4fdfa8e1/scikit_learn-1.2.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:fe0aa1a7029ed3e1dcbf4a5bc675aa3b1bc468d9012ecf6c6f081251ca47f590"}, + {url = "https://files.pythonhosted.org/packages/39/85/95298f12ec1ed756938edafe9f15498109ec8dbfc833ae492ae1cc333933/scikit_learn-1.2.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:fe175ee1dab589d2e1033657c5b6bec92a8a3b69103e3dd361b58014729975c3"}, + {url = "https://files.pythonhosted.org/packages/3a/9c/7e26446b45192186c63bf6e9fc50a4834b8c9d85a719e06d60a244ded6f3/scikit_learn-1.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44b47a305190c28dd8dd73fc9445f802b6ea716669cfc22ab1eb97b335d238b1"}, + {url = "https://files.pythonhosted.org/packages/3c/21/ee21352f69a980614cb4193d68a64a83aa2c0f80183c9485d6d61821a922/scikit_learn-1.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99cc01184e347de485bf253d19fcb3b1a3fb0ee4cea5ee3c43ec0cc429b6d29f"}, + {url = "https://files.pythonhosted.org/packages/48/92/a39d1c9e0a6cb5ed4112899ecca590138484356ba8c4274dde6c3893ff14/scikit_learn-1.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fe83b676f407f00afa388dd1fdd49e5c6612e551ed84f3b1b182858f09e987d"}, + {url = "https://files.pythonhosted.org/packages/4c/64/a1e6e92b850b39200c82e3bc54d556b2c634b3904c39ac5cdb10b1c5765f/scikit_learn-1.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf036ea7ef66115e0d49655f16febfa547886deba20149555a41d28f56fd6d3c"}, + {url = "https://files.pythonhosted.org/packages/4f/6b/a204ee49e2d4dec62b38394adbdc7672e9a9df9f359a80705a07a46cace6/scikit_learn-1.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d5312d9674bed14f73773d2acf15a3272639b981e60b72c9b190a0cffed5bad"}, + {url = "https://files.pythonhosted.org/packages/51/b6/d9a414b6579c4ec5703cebc0fe7b7b6821344190bffa3d46a1a23efd173a/scikit_learn-1.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:6477eed40dbce190f9f9e9d0d37e020815825b300121307942ec2110302b66a3"}, + {url = "https://files.pythonhosted.org/packages/51/d1/58faa69e97ee60e99dcdde5df43f17f0887eda5de9eafb6534a51b63d791/scikit_learn-1.2.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:953236889928d104c2ef14027539f5f2609a47ebf716b8cbe4437e85dce42744"}, + {url = "https://files.pythonhosted.org/packages/5a/43/5c4d21217df6a033999ee531fdfd52809263727b4afb26f7196a8ec709ae/scikit_learn-1.2.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:e6e574db9914afcb4e11ade84fab084536a895ca60aadea3041e85b8ac963edb"}, + {url = "https://files.pythonhosted.org/packages/5b/fb/478a0460ae2843dd2fc7a7f9ddcd8bb033ae21eb968df6a8cbe8094a28bc/scikit_learn-1.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:7f69313884e8eb311460cc2f28676d5e400bd929841a2c8eb8742ae78ebf7c20"}, + {url = "https://files.pythonhosted.org/packages/72/aa/a97b6ae8fc4ce0e1b3837b3613b0563ce843eb34cf4089fb41d613dee957/scikit_learn-1.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8156db41e1c39c69aa2d8599ab7577af53e9e5e7a57b0504e116cc73c39138dd"}, + {url = "https://files.pythonhosted.org/packages/81/84/756be2b975959a5f94124d5584ead75d7ca99184f2d16664a0157b274b9a/scikit_learn-1.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea061bf0283bf9a9f36ea3c5d3231ba2176221bbd430abd2603b1c3b2ed85c89"}, + {url = "https://files.pythonhosted.org/packages/ae/a8/829ef05dbeb9aa4436190ea00c8db6d59a39751b45e17932221d27fe9e51/scikit_learn-1.2.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:2dd3ffd3950e3d6c0c0ef9033a9b9b32d910c61bd06cb8206303fb4514b88a49"}, + {url = "https://files.pythonhosted.org/packages/c9/fa/8e158d81e3602da1e7bafbd4987938bc003fe4b0f44d65681e7f8face95a/scikit-learn-1.2.2.tar.gz", hash = "sha256:8429aea30ec24e7a8c7ed8a3fa6213adf3814a6efbea09e16e0a0c71e1a1a3d7"}, + {url = "https://files.pythonhosted.org/packages/d7/8a/301594a8bb1cfeeb95dd86aa7dfedd31e93211940105429abddf0933cfff/scikit_learn-1.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:065e9673e24e0dc5113e2dd2b4ca30c9d8aa2fa90f4c0597241c93b63130d233"}, + {url = "https://files.pythonhosted.org/packages/db/98/169b46a84b48f92df2b5e163fce75d471f4df933f8b3d925a61133210776/scikit_learn-1.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:8b0670d4224a3c2d596fd572fb4fa673b2a0ccfb07152688ebd2ea0b8c61025c"}, + {url = "https://files.pythonhosted.org/packages/f4/4d/fe3b35e18407da4b386be58616bd0f941ea1762a6c6798267f3aa64ef5d5/scikit_learn-1.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:ad66c3848c0a1ec13464b2a95d0a484fd5b02ce74268eaa7e0c697b904f31d6c"}, + {url = "https://files.pythonhosted.org/packages/fa/1e/36d7609e84b50d4a2e5bc43cd5013d9ea885799e5813a1e9cf5bb1afd3f4/scikit_learn-1.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e2642baa0ad1e8f8188917423dd73994bf25429f8893ddbe115be3ca3183584"}, +] +"scipy 1.9.3" = [ + {url = "https://files.pythonhosted.org/packages/0a/2e/44795c6398e24e45fa0bb61c3e98de1cfea567b1b51efd3751e2f7ff9720/scipy-1.9.3.tar.gz", hash = "sha256:fbc5c05c85c1a02be77b1ff591087c83bc44579c6d2bd9fb798bb64ea5e1a027"}, + {url = "https://files.pythonhosted.org/packages/40/0e/3ff193b6ba6a0a6f13f8d367e8976370232e769bd609c8c11d86e0353adf/scipy-1.9.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:83b89e9586c62e787f5012e8475fbb12185bafb996a03257e9675cd73d3736dd"}, + {url = "https://files.pythonhosted.org/packages/42/14/d2500818b7bb7b862d70c1ae97e646a4795b068583c67720553764095024/scipy-1.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:2318bef588acc7a574f5bfdff9c172d0b1bf2c8143d9582e05f878e580a3781e"}, + {url = "https://files.pythonhosted.org/packages/42/81/0a64d2204c3b261380ac96c6d61f018528108b62c0e21e6153a58cebf4f6/scipy-1.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:06d2e1b4c491dc7d8eacea139a1b0b295f74e1a1a0f704c375028f8320d16e31"}, + {url = "https://files.pythonhosted.org/packages/44/8a/bae77e624391b27aeea2d33a02f2ce4a8019f1378ce92faf5780f1521f2e/scipy-1.9.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:545c83ffb518094d8c9d83cce216c0c32f8c04aaf28b92cc8283eda0685162d5"}, + {url = "https://files.pythonhosted.org/packages/56/af/6a2b90fe280e89466d84747054667f74b84a8304f75931a173090919991f/scipy-1.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cff3a5295234037e39500d35316a4c5794739433528310e117b8a9a0c76d20fc"}, + {url = "https://files.pythonhosted.org/packages/59/0b/8a9acfc5c36bbf6e18d02f3a08db5b83bebba510be2df3230f53852c74a4/scipy-1.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d01e1dd7b15bd2449c8bfc6b7cc67d630700ed655654f0dfcf121600bad205c9"}, + {url = "https://files.pythonhosted.org/packages/59/ef/d54d17c36b46a9b8f6e1d4bf039b7f7ad236504cfb13cf1872caec9cbeaa/scipy-1.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d644a64e174c16cb4b2e41dfea6af722053e83d066da7343f333a54dae9bc31c"}, + {url = "https://files.pythonhosted.org/packages/84/86/4f38fa30c112c3590954420f85d95b8cd23811ecc5cfc4bfd4d988d4db44/scipy-1.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5a04cd7d0d3eff6ea4719371cbc44df31411862b9646db617c99718ff68d4840"}, + {url = "https://files.pythonhosted.org/packages/92/f9/7ae2c1ae200212bc84b5a8369a10d644aa8b588140fe292d59db3b4a2545/scipy-1.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abaf921531b5aeaafced90157db505e10345e45038c39e5d9b6c7922d68085cb"}, + {url = "https://files.pythonhosted.org/packages/b5/67/c5451465ec94e654e6315cd5136961d267ae94a0f799b85d26eb9efe4c9f/scipy-1.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4db5b30849606a95dcf519763dd3ab6fe9bd91df49eba517359e450a7d80ce2e"}, + {url = "https://files.pythonhosted.org/packages/bb/b7/380c9e4cd71263f03d16f8a92c0e44c9bdef38777e1a7dde1f47ba996bac/scipy-1.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c68db6b290cbd4049012990d7fe71a2abd9ffbe82c0056ebe0f01df8be5436b0"}, + {url = "https://files.pythonhosted.org/packages/c3/3e/e40c52775a5d19abd43b1c245fbc5dee283a29acc45c830bc73bfad9468b/scipy-1.9.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:90453d2b93ea82a9f434e4e1cba043e779ff67b92f7a0e85d05d286a3625df3c"}, + {url = "https://files.pythonhosted.org/packages/c8/0f/d9f8c50be8670b7ba6f002679e84cd18f46a23faf62c1590f4d1bbec0c8c/scipy-1.9.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:da8245491d73ed0a994ed9c2e380fd058ce2fa8a18da204681f2fe1f57f98f95"}, + {url = "https://files.pythonhosted.org/packages/ce/28/635391e72e24bd3f4a91e374f4a186a5e4ecc95f23d8a55c9b0d25777cf7/scipy-1.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a72d885fa44247f92743fc20732ae55564ff2a519e8302fb7e18717c5355a8b"}, + {url = "https://files.pythonhosted.org/packages/cf/0e/3f1685c1fcb5dfe35ec027a5fc7a29e8818c61b2cc7fa207b4fc7b959f52/scipy-1.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:68239b6aa6f9c593da8be1509a05cb7f9efe98b80f43a5861cd24c7557e98523"}, + {url = "https://files.pythonhosted.org/packages/d0/96/4f6eac3fea18f836a0e403539556b1684e6f3361fa39aa5d5797dedecd75/scipy-1.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:5b88e6d91ad9d59478fafe92a7c757d00c59e3bdc3331be8ada76a4f8d683f58"}, + {url = "https://files.pythonhosted.org/packages/df/75/c0254dc58d1f1b00f9d3dbda029743b71b815dd512461ed20d9b7f459e37/scipy-1.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b41bc822679ad1c9a5f023bc93f6d0543129ca0f37c1ce294dd9d386f0a21096"}, + {url = "https://files.pythonhosted.org/packages/f4/9d/882134b1e774a9227ab855c71a39612194e1106185595417ce92f0f1e78c/scipy-1.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d54222d7a3ba6022fdf5773931b5d7c56efe41ede7f7128c7b1637700409108"}, + {url = "https://files.pythonhosted.org/packages/f9/37/5cd44af74d7178a44452b17ea162bc93996d5555b4a978877d2efd56fe84/scipy-1.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83c06e62a390a9167da60bedd4575a14c1f58ca9dfde59830fc42e5197283dab"}, + {url = "https://files.pythonhosted.org/packages/fb/ba/1733dbbc19f2aa07d100cfa220bcc83a3977bc5c9f0a5ad262dae1f3ab90/scipy-1.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1884b66a54887e21addf9c16fb588720a8309a57b2e258ae1c7986d4444d3bc0"}, +] +"setuptools 67.6.1" = [ + {url = "https://files.pythonhosted.org/packages/0b/fc/8781442def77b0aa22f63f266d4dadd486ebc0c5371d6290caf4320da4b7/setuptools-67.6.1-py3-none-any.whl", hash = "sha256:e728ca814a823bf7bf60162daf9db95b93d532948c4c0bea762ce62f60189078"}, + {url = "https://files.pythonhosted.org/packages/cb/46/22ec35f286a77e6b94adf81b4f0d59f402ed981d4251df0ba7b992299146/setuptools-67.6.1.tar.gz", hash = "sha256:257de92a9d50a60b8e22abfcbb771571fde0dbf3ec234463212027a4eeecbe9a"}, +] +"six 1.16.0" = [ + {url = "https://files.pythonhosted.org/packages/71/39/171f1c67cd00715f190ba0b100d606d440a28c93c7714febeca8b79af85e/six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, + {url = "https://files.pythonhosted.org/packages/d9/5a/e7c31adbe875f2abbb91bd84cf2dc52d792b5a01506781dbcf25c91daf11/six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, +] +"soundfile 0.12.1" = [ + {url = "https://files.pythonhosted.org/packages/03/0f/49941ed8a2d94e5b36ea94346fb1d2b22e847fede902e05be4c96f26be7d/soundfile-0.12.1-py2.py3-none-win32.whl", hash = "sha256:59dfd88c79b48f441bbf6994142a19ab1de3b9bb7c12863402c2bc621e49091a"}, + {url = "https://files.pythonhosted.org/packages/04/bc/cd845c2dbb4d257c744cd58a5bcdd9f6d235ca317e7e22e49564ec88dcd9/soundfile-0.12.1-py2.py3-none-any.whl", hash = "sha256:828a79c2e75abab5359f780c81dccd4953c45a2c4cd4f05ba3e233ddf984b882"}, + {url = "https://files.pythonhosted.org/packages/50/ff/26a4ee48d0b66625a4e4028a055b9f25bc9d7c7b2d17d21a45137621a50d/soundfile-0.12.1-py2.py3-none-win_amd64.whl", hash = "sha256:0d86924c00b62552b650ddd28af426e3ff2d4dc2e9047dae5b3d8452e0a49a77"}, + {url = "https://files.pythonhosted.org/packages/6f/96/5ff33900998bad58d5381fd1acfcdac11cbea4f08fc72ac1dc25ffb13f6a/soundfile-0.12.1.tar.gz", hash = "sha256:e8e1017b2cf1dda767aef19d2fd9ee5ebe07e050d430f77a0a7c66ba08b8cdae"}, + {url = "https://files.pythonhosted.org/packages/71/87/31d2b9ed58975cec081858c01afaa3c43718eb0f62b5698a876d94739ad0/soundfile-0.12.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:bceaab5c4febb11ea0554566784bcf4bc2e3977b53946dda2b12804b4fe524a8"}, + {url = "https://files.pythonhosted.org/packages/ad/bd/0602167a213d9184fc688b1086dc6d374b7ae8c33eccf169f9b50ce6568c/soundfile-0.12.1-py2.py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:2dc3685bed7187c072a46ab4ffddd38cef7de9ae5eb05c03df2ad569cf4dacbc"}, + {url = "https://files.pythonhosted.org/packages/c1/07/7591f4efd29e65071c3a61b53725036ea8f73366a4920a481ebddaf8d0ca/soundfile-0.12.1-py2.py3-none-manylinux_2_31_x86_64.whl", hash = "sha256:074247b771a181859d2bc1f98b5ebf6d5153d2c397b86ee9e29ba602a8dfe2a6"}, + {url = "https://files.pythonhosted.org/packages/c8/73/059c84343be6509b480013bf1eeb11b96c5f9eb48deff8f83638011f6b2c/soundfile-0.12.1-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:d922be1563ce17a69582a352a86f28ed8c9f6a8bc951df63476ffc310c064bfa"}, +] +"soxr 0.3.4" = [ + {url = "https://files.pythonhosted.org/packages/04/8d/adf526c43f535372058b33daa9b5b7868e2d3023b25121713fc12686f223/soxr-0.3.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fe8b5f92c802f1e7793c40344f5368dc6163718c9ffa82e79ee6ad779d318ac5"}, + {url = "https://files.pythonhosted.org/packages/11/bc/4fe9e8f991bb2ec5900df39e321a5077cb7117e993725e01214229f2af10/soxr-0.3.4-cp310-cp310-win_amd64.whl", hash = "sha256:1e95c96ce94524fae453b4331c9910d33f97506f99bae06d76a9c0649710619e"}, + {url = "https://files.pythonhosted.org/packages/1e/9b/7b82a148ce4b9fff08daf9ed0cd729d62ea3317ae8bbc81574cc37f1dde6/soxr-0.3.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e23de4dfe54ac30e583bbc9cc3feda1cd776fedce13206bc4b3115b75ecab82"}, + {url = "https://files.pythonhosted.org/packages/21/90/6c6185c2b0a2f8340dc1e7f5a98750a0dd8134fcfd9f68d9bf0bdaf923b3/soxr-0.3.4-cp310-cp310-win32.whl", hash = "sha256:182c02a7ba45a159a0dbb0a297335df2381ead03a65377b19663ea0ff720ecb7"}, + {url = "https://files.pythonhosted.org/packages/47/7f/93259c77f5f6b30356f277781ec83ebf953cd4f90b2be87e09ba477cd095/soxr-0.3.4-cp311-cp311-win_amd64.whl", hash = "sha256:7c8350acd7150f74261a0569b47ccb1bb4aa39b2d575860bc97cfa69aab8aead"}, + {url = "https://files.pythonhosted.org/packages/4a/a3/89ceb80e0dcf10378ee1433029ec1a4e5b514b22ad30e3f4c7a073dd2685/soxr-0.3.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:380d2d43871a68e8b1ef1702a0abe6f9e48ddb3933c7a303c45d67e121503e7c"}, + {url = "https://files.pythonhosted.org/packages/4e/21/e3e90fdfbb4316a63a1edf884da198cc2002df4611e0a527efae4bf5fe33/soxr-0.3.4-cp39-cp39-win32.whl", hash = "sha256:83de825d6a713c7b2e76d9ec3f229a58a9ed290237e7adc05d80e8b39be995a6"}, + {url = "https://files.pythonhosted.org/packages/52/a6/4811e2c1564ae628d5915382669c7b16d3428586c288ce24e8e5c4f7a637/soxr-0.3.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:20130329985f9767c8417bbd125fe138790a71802b000481c386a800e2ad2bca"}, + {url = "https://files.pythonhosted.org/packages/52/e4/f747b38e61be13501aed78a3940cddee4fbba616222424ee89678ae3d9ca/soxr-0.3.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b7b84126643c063d5daa203f7f9137e21734dabbd7e68c097607b2ef457e2f2e"}, + {url = "https://files.pythonhosted.org/packages/53/64/de52356e0d0c33536d8e0f84a01178add23de46aabce02d8e3e17b166327/soxr-0.3.4-cp311-cp311-win32.whl", hash = "sha256:e57e9703c2bff834cabc06800d3c11a259544891d2c24a78949f3cf2f5492cc5"}, + {url = "https://files.pythonhosted.org/packages/53/ae/14b702e2e50429ec28a536c31fa57d18754b1e1f400cc36fdb3cc4b25dbf/soxr-0.3.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bb6d4dc807d04c536674429e2b05ae08a1efac9815c4595e41ffd6b57c2c662"}, + {url = "https://files.pythonhosted.org/packages/5d/86/fe792cfd2570991211709974212baaa8f73df62227b97afb728e650d1428/soxr-0.3.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:a680bab57adae462cdc86abcc7330beb5daa3ba5101165583eedcda88b7ba551"}, + {url = "https://files.pythonhosted.org/packages/66/74/83a7c5976e462be5b8dd3908c906b8ae979c7c0637fe517c33fe9d49ffc0/soxr-0.3.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:11bd1396052049e6d389225a0e96a9df15f706da501c619b35d3c72ac6bc7257"}, + {url = "https://files.pythonhosted.org/packages/6d/1f/1b74c11ea2e0c80c210136bdd95a6149962b7d4a2bd6d8b4dffd99fa276e/soxr-0.3.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84e590e75b7e5dca12bf68bfb090276f34a88fbcd793781c62d47f5d7dbe525e"}, + {url = "https://files.pythonhosted.org/packages/6d/ba/ea014aed6162ec943eb7be0de28f0a8c3660d30306e76edbe952b41fd368/soxr-0.3.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:941f7355116fe77fe6a82938fa7799a0e466a494ebc093f676969ce32b2815b1"}, + {url = "https://files.pythonhosted.org/packages/70/61/02aced07a55887f2362f7e14bfbc797f13b1f30a3f980c1df3d29edd338f/soxr-0.3.4-cp38-cp38-win32.whl", hash = "sha256:d858becbc1fcc7b38c3436d3276290fae09403cdcbdf1d5986a18dab7023a6c3"}, + {url = "https://files.pythonhosted.org/packages/74/62/ff187091de089fceabf32992815478fdb7bb1bf665c12ae373b0b9c04ff3/soxr-0.3.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff15853895b54f1b627799c6600be1ce5f7286724e7a93e4b7b9d79e5d4166f5"}, + {url = "https://files.pythonhosted.org/packages/86/7f/f2f766fc6011d0df4a15d1e73c48b790a7b5ab5be60853419533f96bca4a/soxr-0.3.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2678d2719e7496803983584d661eb5fddc7017154a8dda4a774407c56ff07973"}, + {url = "https://files.pythonhosted.org/packages/8b/80/0804aec980abceb431f895acfd42f5c8328e1a967e317ed885a8307246f1/soxr-0.3.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e613cee023b7c3f162b9da3f6b169cd7f58de345275be1fde9f19adc9cf144df"}, + {url = "https://files.pythonhosted.org/packages/92/b5/312c0db7c1ca3109895626832302beeda80e54d15b0846d1e056b09825f5/soxr-0.3.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3479d265574b960e12bca0878baba0862c43278915e0319d84679bb4d4fcd33"}, + {url = "https://files.pythonhosted.org/packages/b1/a0/3d0371c80952dc21d561d84c34dfafc2e7ccd30223a70ce54c024fefdcc8/soxr-0.3.4-cp39-cp39-win_amd64.whl", hash = "sha256:2082f88cae89de854c3e0d62f55d0cb31eb11764f5c2a28299121fb642a22472"}, + {url = "https://files.pythonhosted.org/packages/be/a2/634107c5ea48f4d088afaca7e9c887c80bdd4ade785d27e9f603baf64c59/soxr-0.3.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e7396498a5f5b7d8f23b656f65c24517a6ff5bdc3ee0623ccd491036a43ea08"}, + {url = "https://files.pythonhosted.org/packages/c9/90/ea881677e2c564e0eec17884a5eff5558119533f1ceb505dbd85dc3e1e5d/soxr-0.3.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0063d5f9a4e1a367084f4705301e9da131cf4d2d32aa3fe0072a1245e18088f"}, + {url = "https://files.pythonhosted.org/packages/cd/12/6e5536f3b50244ec73522fb5c3c50511fe89a2a5f4fa481e2779ed010009/soxr-0.3.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4a1b4019c9972f57612482c4f85523d6e832e3d10935e2f070a9dcd334a4dcb"}, + {url = "https://files.pythonhosted.org/packages/d3/96/ec8828162bcff97b70e99dcea82205ba93fd2dda1cbd4ba2a149d2bd7a8f/soxr-0.3.4.tar.gz", hash = "sha256:fe68daf00e8f020977b187699903d219f9e39b9fb3d915f3f923eed8ba431449"}, + {url = "https://files.pythonhosted.org/packages/e8/f5/bfcf99a10250381ed76793d930da816836f2ac8a276de48522001271cc98/soxr-0.3.4-cp38-cp38-win_amd64.whl", hash = "sha256:068ab4df549df5783cc1eb4eb6c94f53823b164dc27134fc621fc9f5097f38cd"}, + {url = "https://files.pythonhosted.org/packages/eb/ce/d5e5222b17abf581469619eafff59a3050b317c93552a4d701bb0db5f274/soxr-0.3.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:00fdbf24f64d8c3fb800425c383048cb24c32defac80901cde4a57fb6ce5d431"}, + {url = "https://files.pythonhosted.org/packages/fc/d9/ca1e4f24c4fa49552b187b10162938ae2d3ce1a1b65fd42ce4960dc36c53/soxr-0.3.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:78090e97abfb326b7cf14ef37d08a17252b07d438388dcbbd82a6836a9d551b1"}, +] +"threadpoolctl 3.1.0" = [ + {url = "https://files.pythonhosted.org/packages/1b/c7/3d85f8b3894ba7228d0c74e16e97a36a72b2cd2b0e0f8f89b5d435d11f71/threadpoolctl-3.1.0.tar.gz", hash = "sha256:a335baacfaa4400ae1f0d8e3a58d6674d2f8828e3716bb2802c44955ad391380"}, + {url = "https://files.pythonhosted.org/packages/61/cf/6e354304bcb9c6413c4e02a747b600061c21d38ba51e7e544ac7bc66aecc/threadpoolctl-3.1.0-py3-none-any.whl", hash = "sha256:8b99adda265feb6773280df41eece7b2e6561b772d21ffd52e372f999024907b"}, +] +"tomli 2.0.1" = [ + {url = "https://files.pythonhosted.org/packages/97/75/10a9ebee3fd790d20926a90a2547f0bf78f371b2f13aa822c759680ca7b9/tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {url = "https://files.pythonhosted.org/packages/c0/3f/d7af728f075fb08564c5949a9c95e44352e23dee646869fa104a3b2060a3/tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] +"torch 1.13.1" = [ + {url = "https://files.pythonhosted.org/packages/00/86/77a9eddbf46f1bca2468d16a401911f58917f95b63402d6a7a4522521e5d/torch-1.13.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:76024be052b659ac1304ab8475ab03ea0a12124c3e7626282c9c86798ac7bc11"}, + {url = "https://files.pythonhosted.org/packages/13/60/6129b3b0ce696e45774ab9b4b4f1a1bf5d74a2b9b2e81c88fb4620e6cad3/torch-1.13.1-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:33e67eea526e0bbb9151263e65417a9ef2d8fa53cbe628e87310060c9dcfa312"}, + {url = "https://files.pythonhosted.org/packages/19/ae/79b619e5f3abc7c3343c19d99678830369e9d87fe5ed44973e08e5b5bcaa/torch-1.13.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:ea8dda84d796094eb8709df0fcd6b56dc20b58fdd6bc4e8d7109930dafc8e419"}, + {url = "https://files.pythonhosted.org/packages/24/45/61e41ef8a84e1d6200ff10b7cb87e23e211599ab62420396a363295f973c/torch-1.13.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:0122806b111b949d21fa1a5f9764d1fd2fcc4a47cb7f8ff914204fd4fc752ed5"}, + {url = "https://files.pythonhosted.org/packages/24/dd/22d7048ba641ed3699fc2dc04eb3384db1f09b0202b64c9f9fede0243e7d/torch-1.13.1-cp37-none-macosx_11_0_arm64.whl", hash = "sha256:f402ca80b66e9fbd661ed4287d7553f7f3899d9ab54bf5c67faada1555abde28"}, + {url = "https://files.pythonhosted.org/packages/25/09/184125ce54b2d7e665f3b674a1ac5d96b442f00dd11804490e926231e36b/torch-1.13.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:50ff5e76d70074f6653d191fe4f6a42fdbe0cf942fbe2a3af0b75eaa414ac038"}, + {url = "https://files.pythonhosted.org/packages/2c/45/43233d36e8e7ec5588fa802bb098337ae73c314863190c68797287f2fbdd/torch-1.13.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:d9fe785d375f2e26a5d5eba5de91f89e6a3be5d11efb497e76705fdf93fa3c2e"}, + {url = "https://files.pythonhosted.org/packages/2d/ab/8210e877debc6e16c5f64345b08abfd667ade733329ef8b38dd06a362513/torch-1.13.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:e0df902a7c7dd6c795698532ee5970ce898672625635d885eade9976e5a04949"}, + {url = "https://files.pythonhosted.org/packages/33/bd/e174e6737daba03f8eaa7c051b9971d361022eb37b86cbe5db0b08cab00e/torch-1.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:98124598cdff4c287dbf50f53fb455f0c1e3a88022b39648102957f3445e9b76"}, + {url = "https://files.pythonhosted.org/packages/56/13/841d298885ca6b48923d502528d51af00a56f93179c152d31ae88d3054cd/torch-1.13.1-cp39-cp39-win_amd64.whl", hash = "sha256:0aa46f0ac95050c604bcf9ef71da9f1172e5037fdf2ebe051962d47b123848e7"}, + {url = "https://files.pythonhosted.org/packages/5d/91/1adb7b73c7dad9fb64ed26bc9d8c060afb9541fca68d71a9c1b48377f332/torch-1.13.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:eeeb204d30fd40af6a2d80879b46a7efbe3cf43cdbeb8838dd4f3d126cc90b2b"}, + {url = "https://files.pythonhosted.org/packages/6b/0e/c640bda79e61766896fe16dfe0a3ab12b06ad50cf8814950518896dec0a5/torch-1.13.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:727dbf00e2cf858052364c0e2a496684b9cb5aa01dc8a8bc8bbb7c54502bdcdd"}, + {url = "https://files.pythonhosted.org/packages/81/58/431fd405855553af1a98091848cf97741302416b01462bbf9909d3c422b3/torch-1.13.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:fd12043868a34a8da7d490bf6db66991108b00ffbeecb034228bfcbbd4197143"}, + {url = "https://files.pythonhosted.org/packages/82/d8/0547f8a22a0c8aeb7e7e5e321892f1dcf93ea021829a99f1a25f1f535871/torch-1.13.1-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:393a6273c832e047581063fb74335ff50b4c566217019cc6ace318cd79eb0566"}, + {url = "https://files.pythonhosted.org/packages/86/08/41315a205bcd103a9698fa8afafbb73a234db8791cdb8b96d1efb10243a7/torch-1.13.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:2c3581a3fd81eb1f0f22997cddffea569fea53bafa372b2c0471db373b26aafc"}, + {url = "https://files.pythonhosted.org/packages/a6/41/122f37c99422566ea74b9cce90eb9218f5e8fb2582466da220f95842a0a0/torch-1.13.1-cp38-cp38-win_amd64.whl", hash = "sha256:5e1e722a41f52a3f26f0c4fcec227e02c6c42f7c094f32e49d4beef7d1e213ea"}, + {url = "https://files.pythonhosted.org/packages/b1/85/62b8da9d984ae95f6fdda707df4af6552a5cea46fde2b944223daf236524/torch-1.13.1-cp37-cp37m-win_amd64.whl", hash = "sha256:2ee7b81e9c457252bddd7d3da66fb1f619a5d12c24d7074de91c4ddafb832c93"}, + {url = "https://files.pythonhosted.org/packages/b9/18/d97cdc571b4cb90c0d3613cffb19a55ef1e48e74e0c5a6c293e97234b7d3/torch-1.13.1-cp37-none-macosx_10_9_x86_64.whl", hash = "sha256:0d9b8061048cfb78e675b9d2ea8503bfe30db43d583599ae8626b1263a0c1380"}, + {url = "https://files.pythonhosted.org/packages/bf/47/d52be83b0ce72e83a6691177f27b110b7efefaae4f228f45e404c521e51d/torch-1.13.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:22128502fd8f5b25ac1cd849ecb64a418382ae81dd4ce2b5cebaa09ab15b0d9b"}, + {url = "https://files.pythonhosted.org/packages/ce/52/14eaa44345e444be2e2da749a7fdbdec71c45294cff33023de12ac3a9115/torch-1.13.1-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:6930791efa8757cb6974af73d4996b6b50c592882a324b8fb0589c6a9ba2ddaf"}, + {url = "https://files.pythonhosted.org/packages/f4/e5/003ac2b5b95cfea3a0a87150ede879daad446d581fdf967a8d7b2d4b6e05/torch-1.13.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:df8434b0695e9ceb8cc70650afc1310d8ba949e6db2a0525ddd9c3b2b181e5fe"}, +] +"torchaudio 0.13.1" = [ + {url = "https://files.pythonhosted.org/packages/01/66/0b5689b4a26255b23bf899d9ae016b04d9ea244e944b8010a71fe5f500ff/torchaudio-0.13.1-cp37-cp37m-win_amd64.whl", hash = "sha256:6bb94deeaf05fab9ff2f34906d3c36b46032420c3a08253d8c452999c235119c"}, + {url = "https://files.pythonhosted.org/packages/03/c9/f80e30d967edd9cf6537933097cce7ae3f9b99b4e7d0a57606816c98f648/torchaudio-0.13.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:4b798447283551def4d911dd6acb2c4bc08214e95f677f56c4f623fc99a90eff"}, + {url = "https://files.pythonhosted.org/packages/09/b0/783c39f3356e6b923a9b3b12a1b2de79e0b48bbe75992c3c2e341d25cb2a/torchaudio-0.13.1-cp39-cp39-win_amd64.whl", hash = "sha256:418fbf93ba77b9742b05b76561be4fe7e8ded27cfe414828624765986b30ce5a"}, + {url = "https://files.pythonhosted.org/packages/29/d9/e96945052a3be97ec5c4e4147c7896c88712b138a399e87c1cc3244780a3/torchaudio-0.13.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:62e9b5c260a27231d905588b72d2e2984ff9cdbb557af86eb178982fd265198d"}, + {url = "https://files.pythonhosted.org/packages/2b/f2/b0109eaf73a15c20901720e9bde572d1cac2f6b129db09290052deaeb2ef/torchaudio-0.13.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0fa7cc1a2b3056fc6ceee6d60dbcdef58955a7ca534667d0db9b4fc9efa087a1"}, + {url = "https://files.pythonhosted.org/packages/38/f9/e6c992cea7751fad817105f22b43c5b9fc45da068ec2371248e5ff9a38c0/torchaudio-0.13.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:32592088b48dfcd2ca247ad5d081a9e0c61de0caabb993d68bac779326456d8d"}, + {url = "https://files.pythonhosted.org/packages/48/0b/99c8f10fccccef0279acdfa2a6c27dd19d7eab3be1fd8fa59c09ad06b436/torchaudio-0.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:5f2fc60206aa687eadc8cfb7c167784678936fbad13ccc583794fba3d6f77e1b"}, + {url = "https://files.pythonhosted.org/packages/56/9e/2aad69ef1ff532cb3bafdd2371fe393f05c46e0f7424f18bc46ede97ad7e/torchaudio-0.13.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:2e47562cdcdd47cb8ed86a3cf053b7067cc9e88340f4550ae73d790ddbc12f21"}, + {url = "https://files.pythonhosted.org/packages/5a/30/41cc29b7433fb18a51b413237eab090c7586137b8ea53979947e1c4c0c59/torchaudio-0.13.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:ec72a17d4d2178829e7780682999b535cf57fe160d0c20b0d6bdc1ad1a87c4dd"}, + {url = "https://files.pythonhosted.org/packages/5c/7e/d5fafef68428d046c67154239835209b2f328b1a87a5306faf155aa9141a/torchaudio-0.13.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:91fcfbf47000402d12bff2624e6220a0fd3b8ca8ee6ff51edf5945ec39ab0a7f"}, + {url = "https://files.pythonhosted.org/packages/7b/ee/a116e97bff82b00b4acdc3fc6c3bb1adbc704e67c6f1e6dc8e8d0ccd276e/torchaudio-0.13.1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:b093b3e7661c85168ec9dde2cf97345965ea0931d3d2a7e78bd409221e6d6998"}, + {url = "https://files.pythonhosted.org/packages/7d/d5/f1ecfd0176bb2d36f6eb2e734eb15a3b675196c3e0f0d3b600b59be54ce0/torchaudio-0.13.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:3023aeb5c191047bef1681a3741bffd4a2164b58a64cad24dd37da5e1ac2d1f1"}, + {url = "https://files.pythonhosted.org/packages/84/6f/5ed3a394935d8002eafb50599192534a155260e15333a688636624d18e54/torchaudio-0.13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9d2170540de32ae031aab3936129868e896ea041617b6d6692dde6aa2dfb0a23"}, + {url = "https://files.pythonhosted.org/packages/b5/73/9ec37c829a380e7dd21e160541d5a69c9b5e3020fcc7e29bee48414e1070/torchaudio-0.13.1-cp38-cp38-win_amd64.whl", hash = "sha256:5de44b6b96a8d7a05650ef7377b2386650ddce92551d7dc02e05e7002aee5fd2"}, + {url = "https://files.pythonhosted.org/packages/c1/37/c42a07263cad3a08939865bf1e6330bfe4e5583421feb1164a38a6d6cf66/torchaudio-0.13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5e0f3dc6699506521364266704e6bf89d0d0579fd435d12c5c2f5858d52de4fa"}, + {url = "https://files.pythonhosted.org/packages/c9/96/5610be43f1a3c2928f2af2147ffb7588fb80b1660e6ba0697e501f10158a/torchaudio-0.13.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:42ce5c66d304bc2cd68338916b8223e322e09a84dcbd9228814ef36bc477a37b"}, + {url = "https://files.pythonhosted.org/packages/e0/63/309be4b55d727d97fbd6617bd912ef9e7aefc5e9256c96367d707ed9a645/torchaudio-0.13.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:167f77ef385592a5af6f4e2ad1630a42ca1b70f905762fcd62e13dd4f163bdcf"}, + {url = "https://files.pythonhosted.org/packages/f2/b4/9eac8f2c692abd6ec21451c47c16a082eb5f2a1ae5cb3121db1f27b37c9a/torchaudio-0.13.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:3c48bcff00eae8180f87f58d1c9e7e9fd8c4cb7eb3ea8817935fb6048d152bc7"}, + {url = "https://files.pythonhosted.org/packages/f6/d4/5e898f626c73f5e9a2ae15be92186e2bb090fa7441c5c00f45549a8cb13d/torchaudio-0.13.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:901a6d65750fc3fb2c656ae274cc61599aa7d5472361fbc206e381a310d619d1"}, +] +"torchvision 0.14.1" = [ + {url = "https://files.pythonhosted.org/packages/04/7e/4eaf3fff579af12c5d02514fd613826b939c24f5a9d2c8962c7e23560382/torchvision-0.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb05dd9dd3af5428fee525400759daf8da8e4caec45ddd6908cfb36571f6433"}, + {url = "https://files.pythonhosted.org/packages/3a/e6/b631892eca70acccd4b86b1dcee4fd23347293e6a231de72af3eb464b1a0/torchvision-0.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:6099b3191dc2516099a32ae38a5fb349b42e863872a13545ab1a524b6567be60"}, + {url = "https://files.pythonhosted.org/packages/49/56/5bc1fcdf3a0974d662aa6a23b72307de3e5b0bb008e70a99e091728c7050/torchvision-0.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:68ed03359dcd3da9cd21b8ab94da21158df8a6a0c5bad0bf4a42f0e448d28cb3"}, + {url = "https://files.pythonhosted.org/packages/49/b0/c004291db040fa5a29877630934564ad1f9107b6f85356dea89aeae8975c/torchvision-0.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:b337e1245ca4353623dd563c03cd8f020c2496a7c5d12bba4d2e381999c766e0"}, + {url = "https://files.pythonhosted.org/packages/56/92/8d86f5a6320f66080b82543a377ad39bc0a6973544bd78d92c07dcc4658b/torchvision-0.14.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:a2d4237d3c9705d7729eb4534e4eb06f1d6be7ff1df391204dfb51586d9b0ecb"}, + {url = "https://files.pythonhosted.org/packages/5d/98/dd18891b11265ccc8a0c4450ce47aa3aafffd08b33037f085224a3ceea99/torchvision-0.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c5e744f56e5f5b452deb5fc0f3f2ba4d2f00612d14d8da0dbefea8f09ac7690b"}, + {url = "https://files.pythonhosted.org/packages/69/1e/63daba0cf15c71d39dee43d98045d4b483b7d7b07ba2a35d7a3e71a7b151/torchvision-0.14.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:8a9eb773a2fa8f516e404ac09c059fb14e6882c48fdbb9c946327d2ce5dba6cd"}, + {url = "https://files.pythonhosted.org/packages/77/ec/ccdfcafb958f6007cc357ce12fd945551a71503b88cd3f78e49fd958f949/torchvision-0.14.1-cp37-cp37m-win_amd64.whl", hash = "sha256:92a324712a87957443cc34223274298ae9496853f115c252f8fc02b931f2340e"}, + {url = "https://files.pythonhosted.org/packages/7c/22/ed84a5fe71a79c2942a726dfce28b1b258f92bf7b70ccdd72ff9d4c1b074/torchvision-0.14.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:a541e49fc3c4e90e49e6988428ab047415ed52ea97d0c0bfd147d8bacb8f4df8"}, + {url = "https://files.pythonhosted.org/packages/7c/d8/353b695fc47b9e2210313874fedd76b7ea5e699f2b2dd674919a95c4a0f1/torchvision-0.14.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:0ed02aefd09bf1114d35f1aa7dce55aa61c2c7e57f9aa02dce362860be654e85"}, + {url = "https://files.pythonhosted.org/packages/8a/88/e83d51deb96de0847884fddb82ac0958fdc06f814c846878489aa5857a91/torchvision-0.14.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:89fb0419780ec9a9eb9f7856a0149f6ac9f956b28f44b0c0080c6b5b48044db7"}, + {url = "https://files.pythonhosted.org/packages/8e/b6/8e910a505014ecdbfceb66610a55722b649663c604f8ff0fd25f74dc84f2/torchvision-0.14.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:83045507ef8d3c015d4df6be79491375b2f901352cfca6e72b4723e9c4f9a55d"}, + {url = "https://files.pythonhosted.org/packages/a5/1a/ce8d3be43d1ff77dd39121e74691a14dcebb96224e5b080d83d48c888e78/torchvision-0.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:30fcf0e9fe57d4ac4ce6426659a57dce199637ccb6c70be1128670f177692624"}, + {url = "https://files.pythonhosted.org/packages/b8/e0/edf3d41324c27f246abe1a4942227c6abe44fb2e62d35807178acb1355ba/torchvision-0.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:13986f0c15377ff23039e1401012ccb6ecf71024ce53def27139e4eac5a57592"}, + {url = "https://files.pythonhosted.org/packages/b9/46/065c71441c0bfb34914569f3728c4ecaa0364e65ed2f3adba98d84be568e/torchvision-0.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8d0766ea92affa7af248e327dd85f7c9cfdf51a57530b43212d4e1858548e9d7"}, + {url = "https://files.pythonhosted.org/packages/c1/04/7babdd446fb62b7bc8f9cc1d8a50e759ac502c077ff882d3c3aa4897566e/torchvision-0.14.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:eaed58cf454323ed9222d4e0dd5fb897064f454b400696e03a5200e65d3a1e76"}, + {url = "https://files.pythonhosted.org/packages/c1/b0/f238bcbbd99a044ce6b8f6e592ee47e0cdf3dfa155c127460a45a2586cad/torchvision-0.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:758b20d079e810b4740bd60d1eb16e49da830e3360f9be379eb177ee221fa5d4"}, + {url = "https://files.pythonhosted.org/packages/cd/d4/66de471d47a293dd52c254deff0081d7c9b8d1304b911bb3cffaa1fa956b/torchvision-0.14.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:6d7b35653113664ea3fdcb71f515cfbf29d2fe393000fd8aaff27a1284de6908"}, + {url = "https://files.pythonhosted.org/packages/f3/f8/c4601983a1ccb75588e77de2fd3932170cc24bdf5839565af839e9fa9729/torchvision-0.14.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:fb7a793fd33ce1abec24b42778419a3fb1e3159d7dfcb274a3ca8fb8cbc408dc"}, +] +"typing-extensions 4.5.0" = [ + {url = "https://files.pythonhosted.org/packages/31/25/5abcd82372d3d4a3932e1fa8c3dbf9efac10cc7c0d16e78467460571b404/typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"}, + {url = "https://files.pythonhosted.org/packages/d3/20/06270dac7316220643c32ae61694e451c98f8caf4c8eab3aa80a2bedf0df/typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"}, +] +"urllib3 1.26.15" = [ + {url = "https://files.pythonhosted.org/packages/21/79/6372d8c0d0641b4072889f3ff84f279b738cd8595b64c8e0496d4e848122/urllib3-1.26.15.tar.gz", hash = "sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305"}, + {url = "https://files.pythonhosted.org/packages/7b/f5/890a0baca17a61c1f92f72b81d3c31523c99bec609e60c292ea55b387ae8/urllib3-1.26.15-py2.py3-none-any.whl", hash = "sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42"}, +] +"wheel 0.40.0" = [ + {url = "https://files.pythonhosted.org/packages/61/86/cc8d1ff2ca31a312a25a708c891cf9facbad4eae493b3872638db6785eb5/wheel-0.40.0-py3-none-any.whl", hash = "sha256:d236b20e7cb522daf2390fa84c55eea81c5c30190f90f29ae2ca1ad8355bf247"}, + {url = "https://files.pythonhosted.org/packages/fc/ef/0335f7217dd1e8096a9e8383e1d472aa14717878ffe07c4772e68b6e8735/wheel-0.40.0.tar.gz", hash = "sha256:cd1196f3faee2b31968d626e1731c94f99cbdb67cf5a46e4f5656cbee7738873"}, +] +"zipp 3.15.0" = [ + {url = "https://files.pythonhosted.org/packages/00/27/f0ac6b846684cecce1ee93d32450c45ab607f65c2e0255f0092032d91f07/zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, + {url = "https://files.pythonhosted.org/packages/5b/fa/c9e82bbe1af6266adf08afb563905eb87cab83fde00a0a08963510621047/zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, +] diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..4ab4d84 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,79 @@ +[tool.pdm] +[tool.pdm.dev-dependencies] +dev = [ + "pytest>=7.2.2", +] + +[project] +name = "batdetect2" +version = "0.2.0" +description = "Deep learning model for detecting and classifying bat echolocation calls in high frequency audio recordings." +authors = [ + { "name" = "Oisin Mac Aodha", "email" = "oisin.macaodha@ed.ac.uk" }, + { "name" = "Santiago Martinez Balvanera", "email" = "santiago.balvanera.20@ucl.ac.uk" } +] +dependencies = [ + "librosa", + "matplotlib", + "numpy", + "pandas", + "scikit-learn", + "scipy", + "torch<2", + "torchaudio", + "torchvision", + "click", +] +requires-python = ">=3.8,<3.11" +readme = "README.md" +license = { text = "CC-by-nc-4" } +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Science/Research", + "Natural Language :: English", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Software Development :: Libraries :: Python Modules", + "Topic :: Multimedia :: Sound/Audio :: Analysis", +] +keywords = [ + "bat", + "echolocation", + "deep learning", + "audio", + "machine learning", + "classification", + "detection", +] + +[build-system] +requires = ["pdm-pep517>=1.0.0"] +build-backend = "pdm.pep517.api" + +[project.scripts] +batdetect2 = "bat_detect.cli:cli" + +[tool.black] +line-length = 80 + +[[tool.mypy.overrides]] +module = [ + "librosa", + "pandas", +] +ignore_missing_imports = true + +[tool.pylsp-mypy] +enabled = false +live_mode = true +strict = true + +[tool.pyright] +include = [ + "bat_detect", + "tests", +] +venvPath = "." +venv = ".venv" diff --git a/requirements.txt b/requirements.txt index 5bb8e16..cac4479 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,3 +7,4 @@ scipy==1.9.3 torch==1.13.0 torchaudio==0.13.0 torchvision==0.14.0 +click diff --git a/run_batdetect.py b/run_batdetect.py index 9655d45..adab803 100644 --- a/run_batdetect.py +++ b/run_batdetect.py @@ -1,67 +1,5 @@ -import os -import argparse -import bat_detect.utils.detector_utils as du - - -def main(args): - - print('Loading model: ' + args['model_path']) - model, params = du.load_model(args['model_path']) - - print('\nInput directory: ' + args['audio_dir']) - files = du.get_audio_files(args['audio_dir']) - print('Number of audio files: {}'.format(len(files))) - print('\nSaving results to: ' + args['ann_dir']) - - # process files - error_files = [] - for ii, audio_file in enumerate(files): - print('\n' + str(ii).ljust(6) + os.path.basename(audio_file)) - try: - results = du.process_file(audio_file, model, params, args) - if args['save_preds_if_empty'] or (len(results['pred_dict']['annotation']) > 0): - results_path = audio_file.replace(args['audio_dir'], args['ann_dir']) - du.save_results_to_file(results, results_path) - except: - error_files.append(audio_file) - print("Error processing file!") - - print('\nResults saved to: ' + args['ann_dir']) - - if len(error_files) > 0: - print('\nUnable to process the follow files:') - for err in error_files: - print(' ' + err) - +"""Run bat_detect.command.main() from the command line.""" +from bat_detect.cli import detect if __name__ == "__main__": - - info_str = '\nBatDetect2 - Detection and Classification\n' + \ - ' Assumes audio files are mono, not stereo.\n' + \ - ' Spaces in the input paths will throw an error. Wrap in quotes "".\n' + \ - ' Input files should be short in duration e.g. < 30 seconds.\n' - - print(info_str) - parser = argparse.ArgumentParser() - parser.add_argument('audio_dir', type=str, help='Input directory for audio') - parser.add_argument('ann_dir', type=str, help='Output directory for where the predictions will be stored') - parser.add_argument('detection_threshold', type=float, help='Cut-off probability for detector e.g. 0.1') - parser.add_argument('--cnn_features', action='store_true', default=False, dest='cnn_features', - help='Extracts CNN call features') - parser.add_argument('--spec_features', action='store_true', default=False, dest='spec_features', - help='Extracts low level call features') - parser.add_argument('--time_expansion_factor', type=int, default=1, dest='time_expansion_factor', - help='The time expansion factor used for all files (default is 1)') - parser.add_argument('--quiet', action='store_true', default=False, dest='quiet', - help='Minimize output printing') - parser.add_argument('--save_preds_if_empty', action='store_true', default=False, dest='save_preds_if_empty', - help='Save empty annotation file if no detections made.') - parser.add_argument('--model_path', type=str, default='models/Net2DFast_UK_same.pth.tar', - help='Path to trained BatDetect2 model') - args = vars(parser.parse_args()) - - args['spec_slices'] = False # used for visualization - args['chunk_size'] = 2 # if files greater than this amount (seconds) they will be broken down into small chunks - args['ann_dir'] = os.path.join(args['ann_dir'], '') - - main(args) + detect() diff --git a/scripts/gen_dataset_summary_image.py b/scripts/gen_dataset_summary_image.py index b789584..7e424ad 100644 --- a/scripts/gen_dataset_summary_image.py +++ b/scripts/gen_dataset_summary_image.py @@ -3,62 +3,95 @@ Loads a set of annotations corresponding to a dataset and saves an image which is the mean spectrogram for each class. """ +import argparse +import os +import sys + import matplotlib.pyplot as plt import numpy as np -import os -import argparse -import sys import viz_helpers as vz -sys.path.append(os.path.join('..')) -import bat_detect.train.train_utils as tu +sys.path.append(os.path.join("..")) import bat_detect.detector.parameters as parameters -import bat_detect.utils.audio_utils as au import bat_detect.train.train_split as ts - +import bat_detect.train.train_utils as tu +import bat_detect.utils.audio_utils as au if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('audio_path', type=str, help='Input directory for audio') - parser.add_argument('op_dir', type=str, - help='Path to where single annotation json file is stored') - parser.add_argument('--ann_file', type=str, - help='Path to where single annotation json file is stored') - parser.add_argument('--uk_split', type=str, default='', - help='Set as: diff or same') - parser.add_argument('--file_type', type=str, default='png', - help='Type of image to save png or pdf') + parser.add_argument( + "audio_path", type=str, help="Input directory for audio" + ) + parser.add_argument( + "op_dir", + type=str, + help="Path to where single annotation json file is stored", + ) + parser.add_argument( + "--ann_file", + type=str, + help="Path to where single annotation json file is stored", + ) + parser.add_argument( + "--uk_split", type=str, default="", help="Set as: diff or same" + ) + parser.add_argument( + "--file_type", + type=str, + default="png", + help="Type of image to save png or pdf", + ) args = vars(parser.parse_args()) - if not os.path.isdir(args['op_dir']): - os.makedirs(args['op_dir']) + if not os.path.isdir(args["op_dir"]): + os.makedirs(args["op_dir"]) params = parameters.get_params(False) - params['smooth_spec'] = False - params['spec_width'] = 48 - params['norm_type'] = 'log' # log, pcen - params['aud_pad'] = 0.005 - classes_to_ignore = params['classes_to_ignore'] + params['generic_class'] - + params["smooth_spec"] = False + params["spec_width"] = 48 + params["norm_type"] = "log" # log, pcen + params["aud_pad"] = 0.005 + classes_to_ignore = params["classes_to_ignore"] + params["generic_class"] # load train annotations - if args['uk_split'] == '': - print('\nLoading:', args['ann_file'], '\n') - dataset_name = os.path.basename(args['ann_file']).replace('.json', '') + if args["uk_split"] == "": + print("\nLoading:", args["ann_file"], "\n") + dataset_name = os.path.basename(args["ann_file"]).replace(".json", "") datasets = [] - datasets.append(tu.get_blank_dataset_dict(dataset_name, False, args['ann_file'], args['audio_path'])) + datasets.append( + tu.get_blank_dataset_dict( + dataset_name, False, args["ann_file"], args["audio_path"] + ) + ) else: # load uk data - special case - print('\nLoading:', args['uk_split'], '\n') - dataset_name = 'uk_' + args['uk_split'] # should be uk_diff, or uk_same - datasets, _ = ts.get_train_test_data(args['ann_file'], args['audio_path'], args['uk_split'], load_extra=False) + print("\nLoading:", args["uk_split"], "\n") + dataset_name = "uk_" + args["uk_split"] # should be uk_diff, or uk_same + datasets, _ = ts.get_train_test_data( + args["ann_file"], + args["audio_path"], + args["uk_split"], + load_extra=False, + ) - anns, class_names, _ = tu.load_set_of_anns(datasets, classes_to_ignore, params['events_of_interest']) + anns, class_names, _ = tu.load_set_of_anns( + datasets, classes_to_ignore, params["events_of_interest"] + ) class_names_order = range(len(class_names)) - x_train, y_train = vz.load_data(anns, params, class_names, smooth_spec=params['smooth_spec'], norm_type=params['norm_type']) + x_train, y_train = vz.load_data( + anns, + params, + class_names, + smooth_spec=params["smooth_spec"], + norm_type=params["norm_type"], + ) - op_file_name = os.path.join(args['op_dir'], dataset_name + '.' + args['file_type']) - vz.save_summary_image(x_train, y_train, class_names, params, op_file_name, class_names_order) - print('\nImage saved to:', op_file_name) + op_file_name = os.path.join( + args["op_dir"], dataset_name + "." + args["file_type"] + ) + vz.save_summary_image( + x_train, y_train, class_names, params, op_file_name, class_names_order + ) + print("\nImage saved to:", op_file_name) diff --git a/scripts/gen_spec_image.py b/scripts/gen_spec_image.py index 11f76de..c8f8639 100644 --- a/scripts/gen_spec_image.py +++ b/scripts/gen_spec_image.py @@ -7,24 +7,27 @@ Will save images with: 3) spectrogram with predicted boxes """ -import numpy as np -import sys -import os import argparse -import matplotlib.pyplot as plt import json +import os +import sys -sys.path.append(os.path.join('..')) +import matplotlib.pyplot as plt +import numpy as np + +sys.path.append(os.path.join("..")) import bat_detect.evaluate.evaluate_models as evlm +import bat_detect.utils.audio_utils as au import bat_detect.utils.detector_utils as du import bat_detect.utils.plot_utils as viz -import bat_detect.utils.audio_utils as au def filter_anns(anns, start_time, stop_time): anns_op = [] for aa in anns: - if (aa['start_time'] >= start_time) and (aa['start_time'] < stop_time-0.02): + if (aa["start_time"] >= start_time) and ( + aa["start_time"] < stop_time - 0.02 + ): anns_op.append(aa) return anns_op @@ -32,85 +35,175 @@ def filter_anns(anns, start_time, stop_time): if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('audio_file', type=str, help='Path to audio file') - parser.add_argument('model_path', type=str, help='Path to BatDetect model') - parser.add_argument('--ann_file', type=str, default='', help='Path to annotation file') - parser.add_argument('--op_dir', type=str, default='plots/', - help='Output directory for plots') - parser.add_argument('--file_type', type=str, default='png', - help='Type of image to save png or pdf') - parser.add_argument('--title_text', type=str, default='', - help='Text to add as title of plots') - parser.add_argument('--detection_threshold', type=float, default=0.2, - help='Threshold for output detections') - parser.add_argument('--start_time', type=float, default=0.0, - help='Start time for cropped file') - parser.add_argument('--stop_time', type=float, default=0.5, - help='End time for cropped file') - parser.add_argument('--time_expansion_factor', type=int, default=1, - help='Time expansion factor') - + parser.add_argument("audio_file", type=str, help="Path to audio file") + parser.add_argument("model_path", type=str, help="Path to BatDetect model") + parser.add_argument( + "--ann_file", type=str, default="", help="Path to annotation file" + ) + parser.add_argument( + "--op_dir", + type=str, + default="plots/", + help="Output directory for plots", + ) + parser.add_argument( + "--file_type", + type=str, + default="png", + help="Type of image to save png or pdf", + ) + parser.add_argument( + "--title_text", + type=str, + default="", + help="Text to add as title of plots", + ) + parser.add_argument( + "--detection_threshold", + type=float, + default=0.2, + help="Threshold for output detections", + ) + parser.add_argument( + "--start_time", + type=float, + default=0.0, + help="Start time for cropped file", + ) + parser.add_argument( + "--stop_time", + type=float, + default=0.5, + help="End time for cropped file", + ) + parser.add_argument( + "--time_expansion_factor", + type=int, + default=1, + help="Time expansion factor", + ) + args_cmd = vars(parser.parse_args()) - - # load the model - bd_args = du.get_default_bd_args() - model, params_bd = du.load_model(args_cmd['model_path']) - bd_args['detection_threshold'] = args_cmd['detection_threshold'] - bd_args['time_expansion_factor'] = args_cmd['time_expansion_factor'] - + + # load the model + bd_args = du.get_default_run_config() + model, params_bd = du.load_model(args_cmd["model_path"]) + bd_args["detection_threshold"] = args_cmd["detection_threshold"] + bd_args["time_expansion_factor"] = args_cmd["time_expansion_factor"] + # load the annotation if it exists gt_present = False - if args_cmd['ann_file'] != '': - if os.path.isfile(args_cmd['ann_file']): - with open(args_cmd['ann_file']) as da: + if args_cmd["ann_file"] != "": + if os.path.isfile(args_cmd["ann_file"]): + with open(args_cmd["ann_file"]) as da: gt_anns = json.load(da) - gt_anns = filter_anns(gt_anns['annotation'], args_cmd['start_time'], args_cmd['stop_time']) + gt_anns = filter_anns( + gt_anns["annotation"], + args_cmd["start_time"], + args_cmd["stop_time"], + ) gt_present = True else: - print('Annotation file not found: ', args_cmd['ann_file']) + print("Annotation file not found: ", args_cmd["ann_file"]) # load the audio file - if not os.path.isfile(args_cmd['audio_file']): - print('Audio file not found: ', args_cmd['audio_file']) + if not os.path.isfile(args_cmd["audio_file"]): + print("Audio file not found: ", args_cmd["audio_file"]) sys.exit() - + # load audio and crop - print('\nProcessing: ' + os.path.basename(args_cmd['audio_file'])) - print('\nOutput directory: ' + args_cmd['op_dir']) - sampling_rate, audio = au.load_audio_file(args_cmd['audio_file'], args_cmd['time_exp'], - params_bd['target_samp_rate'], params_bd['scale_raw_audio']) - st_samp = int(sampling_rate*args_cmd['start_time']) - en_samp = int(sampling_rate*args_cmd['stop_time']) + print("\nProcessing: " + os.path.basename(args_cmd["audio_file"])) + print("\nOutput directory: " + args_cmd["op_dir"]) + sampling_rate, audio = au.load_audio( + args_cmd["audio_file"], + args_cmd["time_exp"], + params_bd["target_samp_rate"], + params_bd["scale_raw_audio"], + ) + st_samp = int(sampling_rate * args_cmd["start_time"]) + en_samp = int(sampling_rate * args_cmd["stop_time"]) if en_samp > audio.shape[0]: - audio = np.hstack((audio, np.zeros((en_samp) - audio.shape[0], dtype=audio.dtype))) + audio = np.hstack( + (audio, np.zeros((en_samp) - audio.shape[0], dtype=audio.dtype)) + ) audio = audio[st_samp:en_samp] duration = audio.shape[0] / sampling_rate - print('File duration: {} seconds'.format(duration)) + print("File duration: {} seconds".format(duration)) # create spec for viz - spec, _ = au.generate_spectrogram(audio, sampling_rate, params_bd, True, False) + spec, _ = au.generate_spectrogram( + audio, sampling_rate, params_bd, True, False + ) + + run_config = { + **params_bd, + **bd_args, + } # run model and filter detections so only keep ones in relevant time range - results = du.process_file(args_cmd['audio_file'], model, params_bd, bd_args) - pred_anns = filter_anns(results['pred_dict']['annotation'], args_cmd['start_time'], args_cmd['stop_time']) - print(len(pred_anns), 'Detections') + results = du.process_file(args_cmd["audio_file"], model, run_config) + pred_anns = filter_anns( + results["pred_dict"]["annotation"], + args_cmd["start_time"], + args_cmd["stop_time"], + ) + print(len(pred_anns), "Detections") # save output - if not os.path.isdir(args_cmd['op_dir']): - os.makedirs(args_cmd['op_dir']) - + if not os.path.isdir(args_cmd["op_dir"]): + os.makedirs(args_cmd["op_dir"]) + # create output file names - op_path_clean = os.path.basename(args_cmd['audio_file'])[:-4] + '_clean.' + args_cmd['file_type'] - op_path_clean = os.path.join(args_cmd['op_dir'], op_path_clean) - op_path_pred = os.path.basename(args_cmd['audio_file'])[:-4] + '_pred.' + args_cmd['file_type'] - op_path_pred = os.path.join(args_cmd['op_dir'], op_path_pred) + op_path_clean = ( + os.path.basename(args_cmd["audio_file"])[:-4] + + "_clean." + + args_cmd["file_type"] + ) + op_path_clean = os.path.join(args_cmd["op_dir"], op_path_clean) + op_path_pred = ( + os.path.basename(args_cmd["audio_file"])[:-4] + + "_pred." + + args_cmd["file_type"] + ) + op_path_pred = os.path.join(args_cmd["op_dir"], op_path_pred) # create and save iamges - viz.save_ann_spec(op_path_clean, spec, params_bd['min_freq'], params_bd['max_freq'], duration, args_cmd['start_time'], '', None) - viz.save_ann_spec(op_path_pred, spec, params_bd['min_freq'], params_bd['max_freq'], duration, args_cmd['start_time'], '', pred_anns) + viz.save_ann_spec( + op_path_clean, + spec, + params_bd["min_freq"], + params_bd["max_freq"], + duration, + args_cmd["start_time"], + "", + None, + ) + viz.save_ann_spec( + op_path_pred, + spec, + params_bd["min_freq"], + params_bd["max_freq"], + duration, + args_cmd["start_time"], + "", + pred_anns, + ) if gt_present: - op_path_gt = os.path.basename(args_cmd['audio_file'])[:-4] + '_gt.' + args_cmd['file_type'] - op_path_gt = os.path.join(args_cmd['op_dir'], op_path_gt) - viz.save_ann_spec(op_path_gt, spec, params_bd['min_freq'], params_bd['max_freq'], duration, args_cmd['start_time'], '', gt_anns) + op_path_gt = ( + os.path.basename(args_cmd["audio_file"])[:-4] + + "_gt." + + args_cmd["file_type"] + ) + op_path_gt = os.path.join(args_cmd["op_dir"], op_path_gt) + viz.save_ann_spec( + op_path_gt, + spec, + params_bd["min_freq"], + params_bd["max_freq"], + duration, + args_cmd["start_time"], + "", + gt_anns, + ) diff --git a/scripts/gen_spec_video.py b/scripts/gen_spec_video.py index cccfcf8..2588ede 100644 --- a/scripts/gen_spec_video.py +++ b/scripts/gen_spec_video.py @@ -8,163 +8,263 @@ Notes: Best to use system one - see ffmpeg_path. """ -from scipy.io import wavfile +import argparse import os import shutil +import sys + import matplotlib.pyplot as plt import numpy as np -import argparse +from scipy.io import wavfile -import sys -sys.path.append(os.path.join('..')) +sys.path.append(os.path.join("..")) import bat_detect.detector.parameters as parameters import bat_detect.utils.audio_utils as au -import bat_detect.utils.plot_utils as viz import bat_detect.utils.detector_utils as du - +import bat_detect.utils.plot_utils as viz if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('audio_file', type=str, help='Path to input audio file') - parser.add_argument('model_path', type=str, help='Path to trained BatDetect model') - parser.add_argument('--op_dir', type=str, default='generated_vids/', help='Path to output directory') - parser.add_argument('--no_detector', action='store_true', help='Do not run detector') - parser.add_argument('--plot_class_names_off', action='store_true', help='Do not plot class names') - parser.add_argument('--disable_axis', action='store_true', help='Do not plot axis') - parser.add_argument('--detection_threshold', type=float, default=0.2, help='Cut-off probability for detector') - parser.add_argument('--time_expansion_factor', type=int, default=1, dest='time_expansion_factor', - help='The time expansion factor used for all files (default is 1)') + parser.add_argument("audio_file", type=str, help="Path to input audio file") + parser.add_argument( + "model_path", type=str, help="Path to trained BatDetect model" + ) + parser.add_argument( + "--op_dir", + type=str, + default="generated_vids/", + help="Path to output directory", + ) + parser.add_argument( + "--no_detector", action="store_true", help="Do not run detector" + ) + parser.add_argument( + "--plot_class_names_off", + action="store_true", + help="Do not plot class names", + ) + parser.add_argument( + "--disable_axis", action="store_true", help="Do not plot axis" + ) + parser.add_argument( + "--detection_threshold", + type=float, + default=0.2, + help="Cut-off probability for detector", + ) + parser.add_argument( + "--time_expansion_factor", + type=int, + default=1, + dest="time_expansion_factor", + help="The time expansion factor used for all files (default is 1)", + ) args_cmd = vars(parser.parse_args()) # file of interest - audio_file = args_cmd['audio_file'] - op_dir = args_cmd['op_dir'] - op_str = '_output' - ffmpeg_path = '/usr/bin/' + audio_file = args_cmd["audio_file"] + op_dir = args_cmd["op_dir"] + op_str = "_output" + ffmpeg_path = "/usr/bin/" if not os.path.isfile(audio_file): - print('Audio file not found: ', audio_file) + print("Audio file not found: ", audio_file) sys.exit() - if not os.path.isfile(args_cmd['model_path']): - print('Model not found: ', model_path) + if not os.path.isfile(args_cmd["model_path"]): + print("Model not found: ", model_path) sys.exit() - start_time = 0.0 duration = 0.5 reveal_boxes = True # makes the boxes appear one at a time fps = 24 dpi = 100 - op_dir_tmp = os.path.join(op_dir, 'op_tmp_vids', '') + op_dir_tmp = os.path.join(op_dir, "op_tmp_vids", "") if not os.path.isdir(op_dir_tmp): os.makedirs(op_dir_tmp) if not os.path.isdir(op_dir): os.makedirs(op_dir) params = parameters.get_params(False) - args = du.get_default_bd_args() - args['time_expansion_factor'] = args_cmd['time_expansion_factor'] - args['detection_threshold'] = args_cmd['detection_threshold'] - + args = du.get_default_run_config() + args["time_expansion_factor"] = args_cmd["time_expansion_factor"] + args["detection_threshold"] = args_cmd["detection_threshold"] # load audio file - print('\nProcessing: ' + os.path.basename(audio_file)) - print('\nOutput directory: ' + op_dir) - sampling_rate, audio = au.load_audio_file(audio_file, args['time_expansion_factor'], params['target_samp_rate']) - audio = audio[int(sampling_rate*start_time):int(sampling_rate*start_time + sampling_rate*duration)] + print("\nProcessing: " + os.path.basename(audio_file)) + print("\nOutput directory: " + op_dir) + sampling_rate, audio = au.load_audio( + audio_file, args["time_expansion_factor"], params["target_samp_rate"] + ) + audio = audio[ + int(sampling_rate * start_time) : int( + sampling_rate * start_time + sampling_rate * duration + ) + ] audio_orig = audio.copy() - audio = au.pad_audio(audio, sampling_rate, params['fft_win_length'], - params['fft_overlap'], params['resize_factor'], - params['spec_divide_factor']) + audio = au.pad_audio( + audio, + sampling_rate, + params["fft_win_length"], + params["fft_overlap"], + params["resize_factor"], + params["spec_divide_factor"], + ) # generate spectrogram spec, _ = au.generate_spectrogram(audio, sampling_rate, params, True) - max_val = spec.max()*1.1 + max_val = spec.max() * 1.1 + if not args_cmd["no_detector"]: + print(" Loading model and running detector on entire file ...") + model, det_params = du.load_model(args_cmd["model_path"]) + det_params["detection_threshold"] = args["detection_threshold"] - if not args_cmd['no_detector']: - print(' Loading model and running detector on entire file ...') - model, det_params = du.load_model(args_cmd['model_path']) - det_params['detection_threshold'] = args['detection_threshold'] - results = du.process_file(audio_file, model, det_params, args) + run_config = { + **det_params, + **args, + } + results = du.process_file(audio_file, model, run_config) - print(' Processing detections and plotting ...') + print(" Processing detections and plotting ...") detections = [] - for bb in results['pred_dict']['annotation']: - if (bb['start_time'] >= start_time) and (bb['end_time'] < start_time+duration): + for bb in results["pred_dict"]["annotation"]: + if (bb["start_time"] >= start_time) and ( + bb["end_time"] < start_time + duration + ): detections.append(bb) # plot boxes - fig = plt.figure(1, figsize=(spec.shape[1]/dpi, spec.shape[0]/dpi), dpi=dpi) - duration = au.x_coords_to_time(spec.shape[1], sampling_rate, params['fft_win_length'], params['fft_overlap']) - viz.create_box_image(spec, fig, detections, start_time, start_time+duration, duration, params, max_val, - plot_class_names=not args_cmd['plot_class_names_off']) - op_im_file_boxes = os.path.join(op_dir, os.path.basename(audio_file)[:-4] + op_str + '_boxes.png') + fig = plt.figure( + 1, figsize=(spec.shape[1] / dpi, spec.shape[0] / dpi), dpi=dpi + ) + duration = au.x_coords_to_time( + spec.shape[1], + sampling_rate, + params["fft_win_length"], + params["fft_overlap"], + ) + viz.create_box_image( + spec, + fig, + detections, + start_time, + start_time + duration, + duration, + params, + max_val, + plot_class_names=not args_cmd["plot_class_names_off"], + ) + op_im_file_boxes = os.path.join( + op_dir, os.path.basename(audio_file)[:-4] + op_str + "_boxes.png" + ) fig.savefig(op_im_file_boxes, dpi=dpi) plt.close(1) spec_with_boxes = plt.imread(op_im_file_boxes) - - print(' Saving audio file ...') - if args['time_expansion_factor']==1: - sampling_rate_op = int(sampling_rate/10.0) + print(" Saving audio file ...") + if args["time_expansion_factor"] == 1: + sampling_rate_op = int(sampling_rate / 10.0) else: sampling_rate_op = sampling_rate - op_audio_file = os.path.join(op_dir, os.path.basename(audio_file)[:-4] + op_str + '.wav') + op_audio_file = os.path.join( + op_dir, os.path.basename(audio_file)[:-4] + op_str + ".wav" + ) wavfile.write(op_audio_file, sampling_rate_op, audio_orig) - - print(' Saving image ...') - op_im_file = os.path.join(op_dir, os.path.basename(audio_file)[:-4] + op_str + '.png') - plt.imsave(op_im_file, spec, vmin=0, vmax=max_val, cmap='plasma') + print(" Saving image ...") + op_im_file = os.path.join( + op_dir, os.path.basename(audio_file)[:-4] + op_str + ".png" + ) + plt.imsave(op_im_file, spec, vmin=0, vmax=max_val, cmap="plasma") spec_blank = plt.imread(op_im_file) # create figure freq_scale = 1000 # turn Hz to kHz - min_freq = params['min_freq']//freq_scale - max_freq = params['max_freq']//freq_scale + min_freq = params["min_freq"] // freq_scale + max_freq = params["max_freq"] // freq_scale y_extent = [0, duration, min_freq, max_freq] - print(' Saving video frames ...') + print(" Saving video frames ...") # save images that will be combined into video # will either plot with or without boxes - for ii, col in enumerate(np.linspace(0, spec.shape[1]-1, int(fps*duration*10))): - if not args_cmd['no_detector']: + for ii, col in enumerate( + np.linspace(0, spec.shape[1] - 1, int(fps * duration * 10)) + ): + if not args_cmd["no_detector"]: spec_op = spec_with_boxes.copy() if ii > 0: spec_op[:, int(col), :] = 1.0 if reveal_boxes: - spec_op[:, int(col)+1:, :] = spec_blank[:, int(col)+1:, :] + spec_op[:, int(col) + 1 :, :] = spec_blank[ + :, int(col) + 1 :, : + ] elif ii == 0 and reveal_boxes: spec_op = spec_blank - if not args_cmd['disable_axis']: - plt.close('all') - fig = plt.figure(ii, figsize=(1.2*(spec_op.shape[1]/dpi), 1.5*(spec_op.shape[0]/dpi)), dpi=dpi) - plt.xlabel('Time - seconds') - plt.ylabel('Frequency - kHz') - plt.imshow(spec_op, vmin=0, vmax=1.0, cmap='plasma', extent=y_extent, aspect='auto') + if not args_cmd["disable_axis"]: + plt.close("all") + fig = plt.figure( + ii, + figsize=( + 1.2 * (spec_op.shape[1] / dpi), + 1.5 * (spec_op.shape[0] / dpi), + ), + dpi=dpi, + ) + plt.xlabel("Time - seconds") + plt.ylabel("Frequency - kHz") + plt.imshow( + spec_op, + vmin=0, + vmax=1.0, + cmap="plasma", + extent=y_extent, + aspect="auto", + ) plt.tight_layout() - fig.savefig(op_dir_tmp + str(ii).zfill(4) + '.png', dpi=dpi) + fig.savefig(op_dir_tmp + str(ii).zfill(4) + ".png", dpi=dpi) else: - plt.imsave(op_dir_tmp + str(ii).zfill(4) + '.png', spec_op, vmin=0, vmax=1.0, cmap='plasma') + plt.imsave( + op_dir_tmp + str(ii).zfill(4) + ".png", + spec_op, + vmin=0, + vmax=1.0, + cmap="plasma", + ) else: spec_op = spec.copy() if ii > 0: spec_op[:, int(col)] = max_val - plt.imsave(op_dir_tmp + str(ii).zfill(4) + '.png', spec_op, vmin=0, vmax=max_val, cmap='plasma') + plt.imsave( + op_dir_tmp + str(ii).zfill(4) + ".png", + spec_op, + vmin=0, + vmax=max_val, + cmap="plasma", + ) - - print(' Creating video ...') - op_vid_file = os.path.join(op_dir, os.path.basename(audio_file)[:-4] + op_str + '.avi') - ffmpeg_cmd = 'ffmpeg -hide_banner -loglevel panic -y -r {} -f image2 -s {}x{} -i {}%04d.png -i {} -vcodec libx264 ' \ - '-crf 25 -pix_fmt yuv420p -acodec copy {}'.format(fps, spec.shape[1], spec.shape[0], op_dir_tmp, op_audio_file, op_vid_file) + print(" Creating video ...") + op_vid_file = os.path.join( + op_dir, os.path.basename(audio_file)[:-4] + op_str + ".avi" + ) + ffmpeg_cmd = ( + "ffmpeg -hide_banner -loglevel panic -y -r {} -f image2 -s {}x{} -i {}%04d.png -i {} -vcodec libx264 " + "-crf 25 -pix_fmt yuv420p -acodec copy {}".format( + fps, + spec.shape[1], + spec.shape[0], + op_dir_tmp, + op_audio_file, + op_vid_file, + ) + ) ffmpeg_cmd = ffmpeg_path + ffmpeg_cmd os.system(ffmpeg_cmd) - print(' Deleting temporary files ...') + print(" Deleting temporary files ...") if os.path.isdir(op_dir_tmp): - shutil.rmtree(op_dir_tmp) + shutil.rmtree(op_dir_tmp) diff --git a/scripts/viz_helpers.py b/scripts/viz_helpers.py index 2f55836..5044b8e 100644 --- a/scripts/viz_helpers.py +++ b/scripts/viz_helpers.py @@ -1,41 +1,70 @@ -import numpy as np -import matplotlib.pyplot as plt -from scipy import ndimage import os import sys -sys.path.append(os.path.join('..')) + +import matplotlib.pyplot as plt +import numpy as np +from scipy import ndimage + +sys.path.append(os.path.join("..")) import bat_detect.utils.audio_utils as au -def generate_spectrogram_data(audio, sampling_rate, params, norm_type='log', smooth_spec=False): - max_freq = round(params['max_freq']*params['fft_win_length']) - min_freq = round(params['min_freq']*params['fft_win_length']) +def generate_spectrogram_data( + audio, sampling_rate, params, norm_type="log", smooth_spec=False +): + max_freq = round(params["max_freq"] * params["fft_win_length"]) + min_freq = round(params["min_freq"] * params["fft_win_length"]) # create spectrogram - numpy - spec = au.gen_mag_spectrogram(audio, sampling_rate, params['fft_win_length'], params['fft_overlap']) - #spec = au.gen_mag_spectrogram_pt(audio, sampling_rate, params['fft_win_length'], params['fft_overlap']).numpy() + spec = au.gen_mag_spectrogram( + audio, sampling_rate, params["fft_win_length"], params["fft_overlap"] + ) + # spec = au.gen_mag_spectrogram_pt(audio, sampling_rate, params['fft_win_length'], params['fft_overlap']).numpy() if spec.shape[0] < max_freq: freq_pad = max_freq - spec.shape[0] - spec = np.vstack((np.zeros((freq_pad, spec.shape[1]), dtype=np.float32), spec)) - spec = spec[-max_freq:spec.shape[0]-min_freq, :] + spec = np.vstack( + (np.zeros((freq_pad, spec.shape[1]), dtype=np.float32), spec) + ) + spec = spec[-max_freq : spec.shape[0] - min_freq, :] - if norm_type == 'log': - log_scaling = 2.0 * (1.0 / sampling_rate) * (1.0/(np.abs(np.hanning(int(params['fft_win_length']*sampling_rate)))**2).sum()) + if norm_type == "log": + log_scaling = ( + 2.0 + * (1.0 / sampling_rate) + * ( + 1.0 + / ( + np.abs( + np.hanning( + int(params["fft_win_length"] * sampling_rate) + ) + ) + ** 2 + ).sum() + ) + ) ##log_scaling = 0.01 - spec = np.log(1.0 + log_scaling*spec).astype(np.float32) - elif norm_type == 'pcen': + spec = np.log(1.0 + log_scaling * spec).astype(np.float32) + elif norm_type == "pcen": spec = au.pcen(spec, sampling_rate) else: pass if smooth_spec: - spec = ndimage.gaussian_filter(spec, 1) + spec = ndimage.gaussian_filter(spec, 1) return spec -def load_data(anns, params, class_names, smooth_spec=False, norm_type='log', extract_bg=False): +def load_data( + anns, + params, + class_names, + smooth_spec=False, + norm_type="log", + extract_bg=False, +): specs = [] labels = [] coords = [] @@ -43,67 +72,106 @@ def load_data(anns, params, class_names, smooth_spec=False, norm_type='log', ext sampling_rates = [] file_names = [] for cur_file in anns: - sampling_rate, audio_orig = au.load_audio_file(cur_file['file_path'], cur_file['time_exp'], - params['target_samp_rate'], params['scale_raw_audio']) + sampling_rate, audio_orig = au.load_audio( + cur_file["file_path"], + cur_file["time_exp"], + params["target_samp_rate"], + params["scale_raw_audio"], + ) - for ann in cur_file['annotation']: - if ann['class'] not in params['classes_to_ignore'] and ann['class'] in class_names: + for ann in cur_file["annotation"]: + if ( + ann["class"] not in params["classes_to_ignore"] + and ann["class"] in class_names + ): # clip out of bounds - if ann['low_freq'] < params['min_freq']: - ann['low_freq'] = params['min_freq'] - if ann['high_freq'] > params['max_freq']: - ann['high_freq'] = params['max_freq'] + if ann["low_freq"] < params["min_freq"]: + ann["low_freq"] = params["min_freq"] + if ann["high_freq"] > params["max_freq"]: + ann["high_freq"] = params["max_freq"] # load cropped audio - start_samp_diff = int(sampling_rate*ann['start_time']) - int(sampling_rate*params['aud_pad']) + start_samp_diff = int(sampling_rate * ann["start_time"]) - int( + sampling_rate * params["aud_pad"] + ) start_samp = np.maximum(0, start_samp_diff) - end_samp = np.minimum(audio_orig.shape[0], int(sampling_rate*ann['end_time'])*2 + int(sampling_rate*params['aud_pad'])) + end_samp = np.minimum( + audio_orig.shape[0], + int(sampling_rate * ann["end_time"]) * 2 + + int(sampling_rate * params["aud_pad"]), + ) audio = audio_orig[start_samp:end_samp] if start_samp_diff < 0: # need to pad at start if the call is at the very begining - audio = np.hstack((np.zeros(-start_samp_diff, dtype=np.float32), audio)) + audio = np.hstack( + (np.zeros(-start_samp_diff, dtype=np.float32), audio) + ) - nfft = int(params['fft_win_length']*sampling_rate) - noverlap = int(params['fft_overlap']*nfft) - max_samps = params['spec_width']*(nfft - noverlap) + noverlap + nfft = int(params["fft_win_length"] * sampling_rate) + noverlap = int(params["fft_overlap"] * nfft) + max_samps = params["spec_width"] * (nfft - noverlap) + noverlap if max_samps > audio.shape[0]: - audio = np.hstack((audio, np.zeros(max_samps - audio.shape[0]))) + audio = np.hstack( + (audio, np.zeros(max_samps - audio.shape[0])) + ) audio = audio[:max_samps].astype(np.float32) - audio = au.pad_audio(audio, sampling_rate, params['fft_win_length'], - params['fft_overlap'], params['resize_factor'], - params['spec_divide_factor']) + audio = au.pad_audio( + audio, + sampling_rate, + params["fft_win_length"], + params["fft_overlap"], + params["resize_factor"], + params["spec_divide_factor"], + ) # generate spectrogram - spec = generate_spectrogram_data(audio, sampling_rate, params, norm_type, smooth_spec)[:, :params['spec_width']] + spec = generate_spectrogram_data( + audio, sampling_rate, params, norm_type, smooth_spec + )[:, : params["spec_width"]] specs.append(spec[np.newaxis, ...]) - labels.append(ann['class']) + labels.append(ann["class"]) audios.append(audio) sampling_rates.append(sampling_rate) - file_names.append(cur_file['file_path']) + file_names.append(cur_file["file_path"]) # position in crop - x1 = int(au.time_to_x_coords(np.array(params['aud_pad']), sampling_rate, params['fft_win_length'], params['fft_overlap'])) - y1 = (ann['low_freq'] - params['min_freq']) * params['fft_win_length'] + x1 = int( + au.time_to_x_coords( + np.array(params["aud_pad"]), + sampling_rate, + params["fft_win_length"], + params["fft_overlap"], + ) + ) + y1 = (ann["low_freq"] - params["min_freq"]) * params[ + "fft_win_length" + ] coords.append((y1, x1)) - _, file_ids = np.unique(file_names, return_inverse=True) labels = np.array([class_names.index(ll) for ll in labels]) - #return np.vstack(specs), labels, coords, audios, sampling_rates, file_ids, file_names + # return np.vstack(specs), labels, coords, audios, sampling_rates, file_ids, file_names return np.vstack(specs), labels -def save_summary_image(specs, labels, species_names, params, op_file_name='plots/all_species.png', order=None): +def save_summary_image( + specs, + labels, + species_names, + params, + op_file_name="plots/all_species.png", + order=None, +): # takes the mean for each class and plots it on a grid mean_specs = [] max_band = [] for ii in range(len(species_names)): - inds = np.where(labels==ii)[0] + inds = np.where(labels == ii)[0] mu = specs[inds, :].mean(0) max_band.append(np.argmax(mu.sum(1))) mean_specs.append(mu) @@ -113,11 +181,21 @@ def save_summary_image(specs, labels, species_names, params, op_file_name='plots order = np.arange(len(species_names)) max_cols = 6 - nrows = int(np.ceil(len(species_names)/max_cols)) + nrows = int(np.ceil(len(species_names) / max_cols)) ncols = np.minimum(len(species_names), max_cols) - fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(ncols*3.3, nrows*6), gridspec_kw = {'wspace':0, 'hspace':0.2}) - spec_min_max = (0, mean_specs[0].shape[1], params['min_freq']/1000, params['max_freq']/1000) + fig, ax = plt.subplots( + nrows=nrows, + ncols=ncols, + figsize=(ncols * 3.3, nrows * 6), + gridspec_kw={"wspace": 0, "hspace": 0.2}, + ) + spec_min_max = ( + 0, + mean_specs[0].shape[1], + params["min_freq"] / 1000, + params["max_freq"] / 1000, + ) ii = 0 for row in ax: @@ -126,17 +204,22 @@ def save_summary_image(specs, labels, species_names, params, op_file_name='plots for col in row: if ii >= len(species_names): - col.axis('off') + col.axis("off") else: - inds = np.where(labels==order[ii])[0] - col.imshow(mean_specs[order[ii]], extent=spec_min_max, cmap='plasma', aspect='equal') - col.grid(color='w', alpha=0.3, linewidth=0.3) + inds = np.where(labels == order[ii])[0] + col.imshow( + mean_specs[order[ii]], + extent=spec_min_max, + cmap="plasma", + aspect="equal", + ) + col.grid(color="w", alpha=0.3, linewidth=0.3) col.set_xticks([]) - col.title.set_text(str(ii+1) + ' ' + species_names[order[ii]]) - col.tick_params(axis='both', which='major', labelsize=7) + col.title.set_text(str(ii + 1) + " " + species_names[order[ii]]) + col.tick_params(axis="both", which="major", labelsize=7) ii += 1 - #plt.tight_layout() - #plt.show() + # plt.tight_layout() + # plt.show() plt.savefig(op_file_name) - plt.close('all') + plt.close("all") diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_api.py b/tests/test_api.py new file mode 100644 index 0000000..8158a1f --- /dev/null +++ b/tests/test_api.py @@ -0,0 +1,253 @@ +"""Test bat detect module API.""" + +import os +from glob import glob + +import numpy as np +import torch +from torch import nn + +from bat_detect import api + +PKG_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +TEST_DATA_DIR = os.path.join(PKG_DIR, "example_data", "audio") +TEST_DATA = glob(os.path.join(TEST_DATA_DIR, "*.wav")) + + +def test_load_model_with_default_params(): + """Test loading model with default parameters.""" + model, params = api.load_model() + + assert model is not None + assert isinstance(model, nn.Module) + + assert params is not None + assert isinstance(params, dict) + + assert "model_name" in params + assert "num_filters" in params + assert "emb_dim" in params + assert "ip_height" in params + + assert params["model_name"] == "Net2DFast" + assert params["num_filters"] == 128 + assert params["emb_dim"] == 0 + assert params["ip_height"] == 128 + assert params["resize_factor"] == 0.5 + assert len(params["class_names"]) == 17 + + +def test_list_audio_files(): + """Test listing audio files.""" + audio_files = api.list_audio_files(TEST_DATA_DIR) + + assert len(audio_files) == 3 + assert all(path.endswith((".wav", ".WAV")) for path in audio_files) + + +def test_load_audio(): + """Test loading audio.""" + audio = api.load_audio(TEST_DATA[0]) + + assert audio is not None + assert isinstance(audio, np.ndarray) + assert audio.shape == (128000,) + + +def test_generate_spectrogram(): + """Test generating spectrogram.""" + audio = api.load_audio(TEST_DATA[0]) + spectrogram = api.generate_spectrogram(audio) + + assert spectrogram is not None + assert isinstance(spectrogram, torch.Tensor) + assert spectrogram.shape == (1, 1, 128, 512) + + +def test_get_default_config(): + """Test getting default configuration.""" + config = api.get_config() + + assert config is not None + assert isinstance(config, dict) + + assert config["target_samp_rate"] == 256000 + assert config["fft_win_length"] == 0.002 + assert config["fft_overlap"] == 0.75 + assert config["resize_factor"] == 0.5 + assert config["spec_divide_factor"] == 32 + assert config["spec_height"] == 256 + assert config["spec_scale"] == "pcen" + assert config["denoise_spec_avg"] is True + assert config["max_scale_spec"] is False + assert config["scale_raw_audio"] is False + assert len(config["class_names"]) == 0 + assert config["detection_threshold"] == 0.01 + assert config["time_expansion"] == 1 + assert config["top_n"] == 3 + assert config["return_raw_preds"] is False + assert config["max_duration"] is None + assert config["nms_kernel_size"] == 9 + assert config["max_freq"] == 120000 + assert config["min_freq"] == 10000 + assert config["nms_top_k_per_sec"] == 200 + assert config["quiet"] is True + assert config["chunk_size"] == 3 + assert config["cnn_features"] is False + assert config["spec_features"] is False + assert config["spec_slices"] is False + + +def test_api_exposes_default_model(): + """Test that API exposes default model.""" + assert hasattr(api, "model") + assert isinstance(api.model, nn.Module) + assert type(api.model).__name__ == "Net2DFast" + + # Check that model has expected attributes + assert api.model.num_classes == 17 + assert api.model.num_filts == 128 + assert api.model.emb_dim == 0 + assert api.model.ip_height_rs == 128 + assert api.model.resize_factor == 0.5 + + +def test_api_exposes_default_config(): + """Test that API exposes default configuration.""" + assert hasattr(api, "config") + assert isinstance(api.config, dict) + + assert api.config["target_samp_rate"] == 256000 + assert api.config["fft_win_length"] == 0.002 + assert api.config["fft_overlap"] == 0.75 + assert api.config["resize_factor"] == 0.5 + assert api.config["spec_divide_factor"] == 32 + assert api.config["spec_height"] == 256 + assert api.config["spec_scale"] == "pcen" + assert api.config["denoise_spec_avg"] is True + assert api.config["max_scale_spec"] is False + assert api.config["scale_raw_audio"] is False + assert len(api.config["class_names"]) == 17 + assert api.config["detection_threshold"] == 0.01 + assert api.config["time_expansion"] == 1 + assert api.config["top_n"] == 3 + assert api.config["return_raw_preds"] is False + assert api.config["max_duration"] is None + assert api.config["nms_kernel_size"] == 9 + assert api.config["max_freq"] == 120000 + assert api.config["min_freq"] == 10000 + assert api.config["nms_top_k_per_sec"] == 200 + assert api.config["quiet"] is True + assert api.config["chunk_size"] == 3 + assert api.config["cnn_features"] is False + assert api.config["spec_features"] is False + assert api.config["spec_slices"] is False + + +def test_process_file_with_default_model(): + """Test processing file with model.""" + predictions = api.process_file(TEST_DATA[0]) + + assert predictions is not None + assert isinstance(predictions, dict) + + assert "pred_dict" in predictions + + # By default will not return other features + assert "spec_feats" not in predictions + assert "spec_feat_names" not in predictions + assert "cnn_feats" not in predictions + assert "cnn_feat_names" not in predictions + assert "spec_slices" not in predictions + + # Check that predictions are returned + assert isinstance(predictions["pred_dict"], dict) + pred_dict = predictions["pred_dict"] + assert pred_dict["id"] == os.path.basename(TEST_DATA[0]) + assert pred_dict["annotated"] is False + assert pred_dict["issues"] is False + assert pred_dict["notes"] == "Automatically generated." + assert pred_dict["time_exp"] == 1 + assert pred_dict["duration"] == 0.5 + assert pred_dict["class_name"] is not None + assert len(pred_dict["annotation"]) > 0 + + +def test_process_spectrogram_with_default_model(): + """Test processing spectrogram with model.""" + audio = api.load_audio(TEST_DATA[0]) + spectrogram = api.generate_spectrogram(audio) + predictions, features = api.process_spectrogram(spectrogram) + + assert predictions is not None + assert isinstance(predictions, list) + assert len(predictions) > 0 + sample_pred = predictions[0] + assert isinstance(sample_pred, dict) + assert "class" in sample_pred + assert "class_prob" in sample_pred + assert "det_prob" in sample_pred + assert "start_time" in sample_pred + assert "end_time" in sample_pred + assert "low_freq" in sample_pred + assert "high_freq" in sample_pred + + assert features is not None + assert isinstance(features, list) + assert len(features) == 1 + + +def test_process_audio_with_default_model(): + """Test processing audio with model.""" + audio = api.load_audio(TEST_DATA[0]) + predictions, features, spec = api.process_audio(audio) + + assert predictions is not None + assert isinstance(predictions, list) + assert len(predictions) > 0 + sample_pred = predictions[0] + assert isinstance(sample_pred, dict) + assert "class" in sample_pred + assert "class_prob" in sample_pred + assert "det_prob" in sample_pred + assert "start_time" in sample_pred + assert "end_time" in sample_pred + assert "low_freq" in sample_pred + assert "high_freq" in sample_pred + + assert features is not None + assert isinstance(features, list) + assert len(features) == 1 + + assert spec is not None + assert isinstance(spec, torch.Tensor) + assert spec.shape == (1, 1, 128, 512) + + +def test_postprocess_model_outputs(): + """Test postprocessing model outputs.""" + # Load model outputs + audio = api.load_audio(TEST_DATA[1]) + spec = api.generate_spectrogram(audio) + model_outputs = api.model(spec) + + # Postprocess outputs + predictions, features = api.postprocess(model_outputs) + + assert predictions is not None + assert isinstance(predictions, list) + assert len(predictions) > 0 + sample_pred = predictions[0] + assert isinstance(sample_pred, dict) + assert "class" in sample_pred + assert "class_prob" in sample_pred + assert "det_prob" in sample_pred + assert "start_time" in sample_pred + assert "end_time" in sample_pred + assert "low_freq" in sample_pred + assert "high_freq" in sample_pred + + assert features is not None + assert isinstance(features, np.ndarray) + assert features.shape[0] == len(predictions) + assert features.shape[1] == 32 diff --git a/tests/test_cli.py b/tests/test_cli.py new file mode 100644 index 0000000..4570cf5 --- /dev/null +++ b/tests/test_cli.py @@ -0,0 +1,41 @@ +"""Test the command line interface.""" +from click.testing import CliRunner + +from bat_detect.cli import cli + + +def test_cli_base_command(): + runner = CliRunner() + result = runner.invoke(cli, ["--help"]) + assert result.exit_code == 0 + assert "BatDetect2 - Bat Call Detection and Classification" in result.output + + +def test_cli_detect_command_help(): + runner = CliRunner() + result = runner.invoke(cli, ["detect", "--help"]) + assert result.exit_code == 0 + assert "Detect bat calls in files in AUDIO_DIR" in result.output + + +def test_cli_detect_command_on_test_audio(tmp_path): + results_dir = tmp_path / "results" + + # Remove results dir if it exists + if results_dir.exists(): + results_dir.rmdir() + + runner = CliRunner() + result = runner.invoke( + cli, + [ + "detect", + "example_data/audio", + str(results_dir), + "0.3", + ], + ) + assert result.exit_code == 0 + assert results_dir.exists() + assert len(list(results_dir.glob("*.csv"))) == 3 + assert len(list(results_dir.glob("*.json"))) == 3