Merge pull request #24 from joelmathewthomas/feature/deepfilternet
Feature/deepfilternet
This commit is contained in:
@@ -1,10 +1,13 @@
|
||||
absl-py==2.1.0
|
||||
aiohappyeyeballs==2.4.4
|
||||
aiohttp==3.11.11
|
||||
aiosignal==1.3.2
|
||||
amqp==5.3.1
|
||||
antlr4-python3-runtime==4.9.3
|
||||
appdirs==1.4.4
|
||||
asteroid==0.7.0
|
||||
asteroid-filterbanks==0.4.0
|
||||
astunparse==1.6.3
|
||||
attrs==25.1.0
|
||||
audioread==3.0.1
|
||||
billiard==4.2.1
|
||||
@@ -21,36 +24,51 @@ cloudpickle==3.1.1
|
||||
contourpy==1.3.1
|
||||
cycler==0.12.1
|
||||
decorator==5.1.1
|
||||
DeepFilterLib==0.5.6
|
||||
DeepFilterNet==0.5.6
|
||||
demucs==4.0.1
|
||||
dora_search==0.1.12
|
||||
einops==0.8.0
|
||||
filelock==3.17.0
|
||||
flatbuffers==25.2.10
|
||||
fonttools==4.55.6
|
||||
frozenlist==1.5.0
|
||||
fsspec==2024.12.0
|
||||
future==1.0.0
|
||||
gast==0.6.0
|
||||
google-pasta==0.2.0
|
||||
grpcio==1.70.0
|
||||
h5py==3.13.0
|
||||
huggingface-hub==0.28.0
|
||||
idna==3.10
|
||||
iniconfig==2.0.0
|
||||
Jinja2==3.1.5
|
||||
joblib==1.4.2
|
||||
julius==0.2.7
|
||||
keras==3.8.0
|
||||
kiwisolver==1.4.8
|
||||
kombu==5.4.2
|
||||
lameenc==1.8.1
|
||||
lazy_loader==0.4
|
||||
libclang==18.1.1
|
||||
librosa==0.10.2.post1
|
||||
lightning-utilities==0.11.9
|
||||
llvmlite==0.44.0
|
||||
loguru==0.7.3
|
||||
Markdown==3.7
|
||||
markdown-it-py==3.0.0
|
||||
MarkupSafe==3.0.2
|
||||
matplotlib==3.10.0
|
||||
mdurl==0.1.2
|
||||
mir_eval==0.7
|
||||
ml-dtypes==0.4.1
|
||||
mpmath==1.3.0
|
||||
msgpack==1.1.0
|
||||
multidict==6.1.0
|
||||
namex==0.0.8
|
||||
networkx==3.4.2
|
||||
numba==0.61.0
|
||||
numpy==2.1.3
|
||||
numpy==1.26.4
|
||||
nvidia-cublas-cu12==12.4.5.8
|
||||
nvidia-cuda-cupti-cu12==12.4.127
|
||||
nvidia-cuda-nvrtc-cu12==12.4.127
|
||||
@@ -65,7 +83,9 @@ nvidia-nvjitlink-cu12==12.4.127
|
||||
nvidia-nvtx-cu12==12.4.127
|
||||
omegaconf==2.3.0
|
||||
openunmix==1.3.0
|
||||
packaging==24.2
|
||||
opt_einsum==3.4.0
|
||||
optree==0.14.0
|
||||
packaging==23.2
|
||||
pandas==2.2.3
|
||||
pb-bss-eval==0.0.2
|
||||
pesq==0.0.4
|
||||
@@ -75,7 +95,9 @@ pluggy==1.5.0
|
||||
pooch==1.8.2
|
||||
prompt_toolkit==3.0.50
|
||||
propcache==0.2.1
|
||||
protobuf==5.29.3
|
||||
pycparser==2.22
|
||||
Pygments==2.19.1
|
||||
pyparsing==3.2.1
|
||||
pystoi==0.4.1
|
||||
pytest==8.3.4
|
||||
@@ -87,6 +109,7 @@ PyYAML==6.0.2
|
||||
redis==5.2.1
|
||||
requests==2.32.3
|
||||
retrying==1.3.4
|
||||
rich==13.9.4
|
||||
scikit-learn==1.6.1
|
||||
scipy==1.15.1
|
||||
setuptools==75.8.0
|
||||
@@ -95,6 +118,12 @@ soundfile==0.13.1
|
||||
soxr==0.5.0.post1
|
||||
submitit==1.5.2
|
||||
sympy==1.13.1
|
||||
tensorboard==2.18.0
|
||||
tensorboard-data-server==0.7.2
|
||||
tensorflow==2.18.0
|
||||
tensorflow-hub==0.16.1
|
||||
termcolor==2.5.0
|
||||
tf_keras==2.18.0
|
||||
threadpoolctl==3.5.0
|
||||
torch==2.5.1
|
||||
torch-optimizer==0.1.0
|
||||
@@ -109,4 +138,7 @@ tzdata==2025.1
|
||||
urllib3==2.3.0
|
||||
vine==5.1.0
|
||||
wcwidth==0.2.13
|
||||
Werkzeug==3.1.3
|
||||
wheel==0.45.1
|
||||
wrapt==1.17.2
|
||||
yarl==1.18.3
|
||||
@@ -1,51 +0,0 @@
|
||||
absl-py==2.1.0
|
||||
astunparse==1.6.3
|
||||
certifi==2024.12.14
|
||||
charset-normalizer==3.4.1
|
||||
flatbuffers==25.1.24
|
||||
gast==0.6.0
|
||||
google-pasta==0.2.0
|
||||
grpcio==1.70.0
|
||||
h5py==3.12.1
|
||||
idna==3.10
|
||||
keras==3.8.0
|
||||
libclang==18.1.1
|
||||
Markdown==3.7
|
||||
markdown-it-py==3.0.0
|
||||
MarkupSafe==3.0.2
|
||||
mdurl==0.1.2
|
||||
ml-dtypes==0.4.1
|
||||
namex==0.0.8
|
||||
numpy==2.0.2
|
||||
nvidia-cublas-cu12==12.5.3.2
|
||||
nvidia-cuda-cupti-cu12==12.5.82
|
||||
nvidia-cuda-nvcc-cu12==12.5.82
|
||||
nvidia-cuda-nvrtc-cu12==12.5.82
|
||||
nvidia-cuda-runtime-cu12==12.5.82
|
||||
nvidia-cudnn-cu12==9.3.0.75
|
||||
nvidia-cufft-cu12==11.2.3.61
|
||||
nvidia-curand-cu12==10.3.6.82
|
||||
nvidia-cusolver-cu12==11.6.3.83
|
||||
nvidia-cusparse-cu12==12.5.1.3
|
||||
nvidia-nccl-cu12==2.21.5
|
||||
nvidia-nvjitlink-cu12==12.5.82
|
||||
opt_einsum==3.4.0
|
||||
optree==0.14.0
|
||||
packaging==24.2
|
||||
protobuf==5.29.3
|
||||
Pygments==2.19.1
|
||||
requests==2.32.3
|
||||
rich==13.9.4
|
||||
setuptools==75.8.0
|
||||
six==1.17.0
|
||||
tensorboard==2.18.0
|
||||
tensorboard-data-server==0.7.2
|
||||
tensorflow==2.18.0
|
||||
tensorflow-hub==0.16.1
|
||||
termcolor==2.5.0
|
||||
tf_keras==2.18.0
|
||||
typing_extensions==4.12.2
|
||||
urllib3==2.3.0
|
||||
Werkzeug==3.1.3
|
||||
wheel==0.45.1
|
||||
wrapt==1.17.2
|
||||
@@ -1,23 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Define the paths for the environments
|
||||
ENV_1_PATH="envs/env"
|
||||
ENV_2_PATH="envs/env_tensorflow"
|
||||
|
||||
# Check if the environments exist
|
||||
if [ ! -d "$ENV_1_PATH" ] || [ ! -d "$ENV_2_PATH" ]; then
|
||||
echo "One or both environments do not exist in the 'envs/' directory."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Activate the first environment (default)
|
||||
echo "Activating environment '$ENV_1_PATH'..."
|
||||
source "$ENV_1_PATH/bin/activate"
|
||||
|
||||
# Export PYTHONPATH to include both environments' site-packages
|
||||
export PYTHONPATH="$PWD/$ENV_1_PATH/lib/python3.12/site-packages:$PWD/$ENV_2_PATH/lib/python3.12/site-packages"
|
||||
|
||||
echo "Environment set up successfully. PYTHONPATH set to include both environments."
|
||||
|
||||
# Optionally, print the current PYTHONPATH to verify
|
||||
echo "PYTHONPATH=${PYTHONPATH}"
|
||||
@@ -1,46 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Check if the script is running in the root directory of the project
|
||||
PROJECT_ROOT="freq-split-enhance"
|
||||
CURRENT_DIR=$(basename "$PWD")
|
||||
|
||||
if [ "$CURRENT_DIR" != "$PROJECT_ROOT" ]; then
|
||||
echo "This script must be run in the root directory of the project: '$PROJECT_ROOT'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if the envs/ directory exists
|
||||
if [ ! -d "envs" ]; then
|
||||
echo "Directory 'envs/' does not exist. Please make sure it exists and contains the required environments. Please run the scripts/setup_env.sh script."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if the requirements/ directory exists, create it if not
|
||||
if [ ! -d "requirements" ]; then
|
||||
echo "Directory 'requirements/' does not exist. Creating it..."
|
||||
mkdir requirements
|
||||
fi
|
||||
|
||||
# Function to freeze the dependencies of an environment
|
||||
freeze_env_deps() {
|
||||
local env_dir=$1
|
||||
local requirements_file=$2
|
||||
|
||||
echo "Freezing dependencies for environment '$env_dir'..."
|
||||
source "$env_dir/bin/activate" # Activate the environment
|
||||
pip freeze > "$requirements_file" # Freeze the dependencies
|
||||
deactivate # Deactivate the environment
|
||||
echo "Dependencies for '$env_dir' saved to '$requirements_file'."
|
||||
}
|
||||
|
||||
# Loop through all the environments inside envs/
|
||||
for env_dir in envs/*; do
|
||||
if [ -d "$env_dir" ]; then
|
||||
env_name=$(basename "$env_dir")
|
||||
requirements_file="requirements/$env_name.txt"
|
||||
freeze_env_deps "$env_dir" "$requirements_file"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Dependencies for all environments have been successfully frozen."
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Check if the script is running in the root directory of the project
|
||||
PROJECT_ROOT="freq-split-enhance"
|
||||
CURRENT_DIR=$(basename "$PWD")
|
||||
|
||||
if [ "$CURRENT_DIR" != "$PROJECT_ROOT" ]; then
|
||||
echo "This script must be run in the root directory of the project: '$PROJECT_ROOT'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if the envs/ directory exists
|
||||
if [ ! -d "envs" ]; then
|
||||
echo "Directory 'envs/' does not exist. Please make sure it exists and contains the required environments. Please run the scripts/setup_env.sh script."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# List all environments in the envs/ directory
|
||||
echo "Available environments:"
|
||||
env_count=0
|
||||
envs_list=()
|
||||
|
||||
for env_dir in envs/*; do
|
||||
if [ -d "$env_dir" ]; then
|
||||
env_name=$(basename "$env_dir")
|
||||
envs_list+=("$env_name")
|
||||
echo "$((env_count + 1)). $env_name"
|
||||
((env_count++))
|
||||
fi
|
||||
done
|
||||
|
||||
# Check if any environments exist
|
||||
if [ "$env_count" -eq 0 ]; then
|
||||
echo "No environments found in 'envs/'. Please create them first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ask the user to select an environment
|
||||
read -p "Select an environment (1-$env_count): " env_choice
|
||||
|
||||
# Validate the user's choice
|
||||
if [[ ! "$env_choice" =~ ^[0-9]+$ ]] || [ "$env_choice" -lt 1 ] || [ "$env_choice" -gt "$env_count" ]; then
|
||||
echo "Invalid choice. Please select a number between 1 and $env_count."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the selected environment name
|
||||
selected_env="${envs_list[$((env_choice - 1))]}"
|
||||
|
||||
# Ask the user for the module they want to install
|
||||
read -p "Enter the module you want to install in the '$selected_env' environment: " module_name
|
||||
|
||||
# Function to install a module in the selected environment
|
||||
install_module() {
|
||||
local env_dir=$1
|
||||
local module=$2
|
||||
|
||||
echo "Activating environment '$env_dir' and installing module '$module'..."
|
||||
source "$env_dir/bin/activate" # Activate the environment
|
||||
pip install "$module" # Install the module
|
||||
deactivate # Deactivate the environment
|
||||
echo "Module '$module' installed successfully in '$env_dir'."
|
||||
}
|
||||
|
||||
# Install the module in the selected environment
|
||||
install_module "envs/$selected_env" "$module_name"
|
||||
|
||||
echo "Module installation complete."
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Check if the script is running in the root directory of the project
|
||||
PROJECT_ROOT="freq-split-enhance"
|
||||
CURRENT_DIR=$(basename "$PWD")
|
||||
|
||||
if [ "$CURRENT_DIR" != "$PROJECT_ROOT" ]; then
|
||||
echo "This script must be run in the root directory of the project: '$PROJECT_ROOT'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Setting up virtual environments"
|
||||
|
||||
mkdir envs
|
||||
|
||||
#Create common env
|
||||
echo "Creating common virtual environment env"
|
||||
python -m venv envs/env
|
||||
|
||||
#Create env for tensorflow
|
||||
echo "Creating virtual environment env_tensorflow"
|
||||
python -m venv envs/env_tensorflow
|
||||
|
||||
source envs/env/bin/activate
|
||||
echo "Installing dependencies in virtual environment env"
|
||||
pip install -r requirements/env.txt
|
||||
deactivate
|
||||
|
||||
source envs/env_tensorflow/bin/activate
|
||||
echo "Installing dependencies in virtual environment env_tensorflow"
|
||||
pip install -r requirements/env_tensorflow.txt
|
||||
deactivate
|
||||
@@ -5,6 +5,10 @@ import numpy as np
|
||||
import csv
|
||||
import os
|
||||
|
||||
|
||||
# Force TensorFlow to use only CPU
|
||||
tf.config.set_visible_devices([], 'GPU')
|
||||
|
||||
model = hub.load('https://tfhub.dev/google/yamnet/1')
|
||||
|
||||
#Find the name of the class with the top score when mean-aggregated across frames.
|
||||
|
||||
@@ -0,0 +1,12 @@
|
||||
# __init__.py
|
||||
|
||||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
format='%(asctime)s : %(message)s',
|
||||
level = logging.INFO
|
||||
)
|
||||
|
||||
logging.info("freq-split-enhance/refinement package has been imported.")
|
||||
@@ -0,0 +1,35 @@
|
||||
import os
|
||||
import torch
|
||||
from df.enhance import enhance, init_df, load_audio, save_audio
|
||||
|
||||
def noisereduce(input_audio_path, output_audio_path, model_path=None):
|
||||
"""
|
||||
Apply noise reduction using DeepFilterNet.
|
||||
|
||||
Args:
|
||||
input_audio_path (str): Path to the input noisy audio file.
|
||||
output_audio_path (str): Path to save the enhanced audio file.
|
||||
model_path (str, optional): Path to a custom DeepFilterNet model. Defaults to None (uses the pre-trained model).
|
||||
|
||||
Returns:
|
||||
str: Path to the enhanced audio file.
|
||||
"""
|
||||
if not os.path.exists(input_audio_path):
|
||||
raise FileNotFoundError(f"Input file {input_audio_path} not found")
|
||||
|
||||
# Initialize DeepFilterNet model
|
||||
model, df_state, _ = init_df(model_path)
|
||||
|
||||
# Load audio
|
||||
audio, _ = load_audio(input_audio_path, sr=df_state.sr())
|
||||
|
||||
# Ensure output path exists
|
||||
os.makedirs(os.path.dirname(output_audio_path), exist_ok=True)
|
||||
|
||||
# Apply noise reduction
|
||||
enhanced_audio = enhance(model, df_state, audio)
|
||||
|
||||
# Save the enhanced audio
|
||||
save_audio(output_audio_path, enhanced_audio, df_state.sr())
|
||||
|
||||
return output_audio_path
|
||||
Binary file not shown.
@@ -0,0 +1,23 @@
|
||||
import os
|
||||
import pytest
|
||||
import soundfile as sf
|
||||
from src.refinement.deepfilternet_wrapper import noisereduce
|
||||
|
||||
def test_noisereduce():
|
||||
"""Test noise reduction function to ensure output is valid."""
|
||||
input_audio_path = "tests/test_audio/noise.wav"
|
||||
output_audio_path = "/tmp/noisereduce/output.wav"
|
||||
|
||||
# Ensure test input exists
|
||||
assert os.path.exists(input_audio_path), f"Test input file {input_audio_path} not found."
|
||||
|
||||
# Run noise reduction
|
||||
noisereduce(input_audio_path, output_audio_path)
|
||||
|
||||
# Check if the output file exists
|
||||
assert os.path.exists(output_audio_path), "Output file was not created."
|
||||
|
||||
# Load the enhanced audio and check if it's valid
|
||||
enhanced_audio, sample_rate = sf.read(output_audio_path)
|
||||
assert len(enhanced_audio) > 0, "Enhanced audio is empty."
|
||||
assert sample_rate > 0, "Invalid sample rate in output file."
|
||||
Reference in New Issue
Block a user