# Sound Event Detection
This repository implement a class which allow to build classifier for audio signal following the SED (Sound Event Detection) architecture.
The model create as well mel spectrogram and use CNN backbone.
This structure has been used during the birdcall competition which allow me to reach the 3th position in 2021.
This is implemented on pytorch and tensorflow, however the tensorflow version has not been tested for training purpose (only inference). I would advice to use only the pytorch version.
# Installation
> pip install audio-sed
if you use tensorflow:
> pip install tflibrosa
# Examples
## Pytorch
```{python}
import torch
import timm
from torch import nn
import numpy as np
from audio_sed.sed_config import ConfigSED
from audio_sed.pytorch.sed_models import AudioClassifierSequenceSED, AudioSED, shape_from_backbone
def load_model(model_name, num_classe, cfg_sed):
backbone = timm.create_model( model_name, pretrained=False)
if "efficientnet" in model_name:
backbone.global_pool = nn.Identity()
in_feat = backbone.classifier.in_features
backbone.classifier = nn.Identity()
elif "convnext" in cfg.model_name:
in_feat = backbone.head.fc.in_features
backbone.head = nn.Identity()
in_features = shape_from_backbone(inputs=torch.as_tensor(np.random.uniform(0, 1, (1, int(5 * cfg_sed.sample_rate)))).float(), model=backbone,
use_logmel=True, config_sed = cfg_sed.__dict__)[2] # (batch size, channels, num_steps, y_axis)
print("Num timestamps features:",in_features)
model = AudioSED(backbone, num_classes=[num_classe], in_features=in_feat, hidden_size=1024, activation= 'sigmoid', use_logmel=True,
spectrogram_augmentation = None, apply_attention="step", drop_rate = [0.5, 0.5], config_sed= cfg_sed.__dict__)
model2 = AudioClassifierSequenceSED(model)
return model, model2
cfg_sed = ConfigSED(window='hann', center=True, pad_mode='reflect', windows_size=1024, hop_size=320,
sample_rate=32000, mel_bins=128, fmin=50, fmax=16000, ref=1.0, amin=1e-10, top_db=None)
model_5, model = load_model(model_name="tf_efficientnet_b2_ns", num_classe=575, cfg_sed=cfg_sed)
inputs = torch.as_tensor(np.random.uniform(0,1, (20*32000)).reshape(1,4,-1)).float()
with torch.no_grad():
o = model(inputs)
print(o[0]['clipwise'])
```
## Tensorflow
```{python}
import tensorflow as tf
from audio_sed.sed_config import ConfigSED
from audio_sed.tensorflow.sed_models import AudioClassifierSequenceSED as AudioClassifierSequenceSEDTF, AudioSED as AudioSEDTF, shape_from_backbone as shape_from_backboneTF
def load_modeltf(model_name, num_classe, cfg_sed):
backbone =tf.keras.applications.efficientnet.EfficientNetB2(
include_top=False)
if "efficientnet" in model_name:
in_feat = backbone.layers[-1].output.shape[-1]
elif "convnext" in cfg.model_name:
in_feat = backbone.layers[-1].output.shape[-1]
# batch size, num_steps, y_axis, channels
in_features = shape_from_backboneTF(inputs=np.random.uniform(0, 1, (1, int(5 * cfg_sed.sample_rate))), model=backbone, use_logmel=True, config_sed = cfg_sed.__dict__)[1]
print("Num timestamps features:",in_features)
model = AudioSEDTF(backbone, num_classes=[num_classe], in_features=in_feat, hidden_size=1024, activation= 'sigmoid', use_logmel=True,
spectrogram_augmentation = None, apply_attention="step", drop_rate = [0.5, 0.5], config_sed= cfg_sed.__dict__)
model = AudioClassifierSequenceSEDTF(model)
return model
cfg_sed = ConfigSED(window='hann', center=True, pad_mode='reflect', windows_size=1024, hop_size=320,
sample_rate=32000, mel_bins=128, fmin=50, fmax=16000, ref=1.0, amin=1e-10, top_db=None)
inputs = np.random.uniform(0,1, (1, 4, 5*32000))
o_tf = model_tf.predict(inputs)
print(o_tf[0])
```
# Examples 2:
You can find [here](https://github.com/Shiro-LK/Portfolio-project/tree/main/BirdsCall_Detection) an example of how this model is used with a GUI for inference with some checkpoint available.
# Citation
- [PANNs: Large-Scale Pretrained Audio Neural Networks for Audio Pattern Recognition](https://arxiv.org/abs/1912.10211)
- PANNs model: https://github.com/qiuqiangkong/audioset_tagging_cnn
- https://github.com/Shiro-LK/tflibrosa
Raw data
{
"_id": null,
"home_page": "https://github.com/Shiro-LK/SoundEventDetection",
"name": "audio-sed",
"maintainer": null,
"docs_url": null,
"requires_python": null,
"maintainer_email": null,
"keywords": "torch, tensorflow, audio_sed",
"author": "Shiro-LK",
"author_email": "shirosaki94@gmail.com",
"download_url": "https://files.pythonhosted.org/packages/e9/89/d1b65527727e6a75aa165e2bd71a637a6a946c3947afdfaec725eaaddd6f/audio_sed-0.0.1.1.tar.gz",
"platform": null,
"description": "# Sound Event Detection\r\n\r\nThis repository implement a class which allow to build classifier for audio signal following the SED (Sound Event Detection) architecture.\r\nThe model create as well mel spectrogram and use CNN backbone.\r\nThis structure has been used during the birdcall competition which allow me to reach the 3th position in 2021.\r\n\r\nThis is implemented on pytorch and tensorflow, however the tensorflow version has not been tested for training purpose (only inference). I would advice to use only the pytorch version.\r\n\r\n# Installation \r\n\r\n> pip install audio-sed\r\n\r\nif you use tensorflow:\r\n\r\n> pip install tflibrosa\r\n\r\n# Examples\r\n\r\n## Pytorch\r\n```{python}\r\nimport torch\r\nimport timm\r\nfrom torch import nn\r\nimport numpy as np\r\nfrom audio_sed.sed_config import ConfigSED\r\nfrom audio_sed.pytorch.sed_models import AudioClassifierSequenceSED, AudioSED, shape_from_backbone\r\n\r\ndef load_model(model_name, num_classe, cfg_sed):\r\n backbone = timm.create_model( model_name, pretrained=False)\r\n if \"efficientnet\" in model_name:\r\n backbone.global_pool = nn.Identity()\r\n in_feat = backbone.classifier.in_features\r\n backbone.classifier = nn.Identity()\r\n elif \"convnext\" in cfg.model_name:\r\n in_feat = backbone.head.fc.in_features\r\n backbone.head = nn.Identity()\r\n\r\n in_features = shape_from_backbone(inputs=torch.as_tensor(np.random.uniform(0, 1, (1, int(5 * cfg_sed.sample_rate)))).float(), model=backbone, \r\n use_logmel=True, config_sed = cfg_sed.__dict__)[2] # (batch size, channels, num_steps, y_axis) \r\n print(\"Num timestamps features:\",in_features)\r\n model = AudioSED(backbone, num_classes=[num_classe], in_features=in_feat, hidden_size=1024, activation= 'sigmoid', use_logmel=True, \r\n spectrogram_augmentation = None, apply_attention=\"step\", drop_rate = [0.5, 0.5], config_sed= cfg_sed.__dict__)\r\n\r\n model2 = AudioClassifierSequenceSED(model)\r\n \r\n return model, model2\r\n\r\ncfg_sed = ConfigSED(window='hann', center=True, pad_mode='reflect', windows_size=1024, hop_size=320,\r\n sample_rate=32000, mel_bins=128, fmin=50, fmax=16000, ref=1.0, amin=1e-10, top_db=None)\r\n\r\nmodel_5, model = load_model(model_name=\"tf_efficientnet_b2_ns\", num_classe=575, cfg_sed=cfg_sed)\r\ninputs = torch.as_tensor(np.random.uniform(0,1, (20*32000)).reshape(1,4,-1)).float()\r\nwith torch.no_grad():\r\n o = model(inputs)\r\nprint(o[0]['clipwise'])\r\n```\r\n\r\n## Tensorflow \r\n\r\n```{python}\r\nimport tensorflow as tf\r\nfrom audio_sed.sed_config import ConfigSED\r\nfrom audio_sed.tensorflow.sed_models import AudioClassifierSequenceSED as AudioClassifierSequenceSEDTF, AudioSED as AudioSEDTF, shape_from_backbone as shape_from_backboneTF\r\n\r\n\r\n\r\ndef load_modeltf(model_name, num_classe, cfg_sed):\r\n backbone =tf.keras.applications.efficientnet.EfficientNetB2(\r\n include_top=False) \r\n if \"efficientnet\" in model_name:\r\n in_feat = backbone.layers[-1].output.shape[-1] \r\n elif \"convnext\" in cfg.model_name:\r\n in_feat = backbone.layers[-1].output.shape[-1]\r\n # batch size, num_steps, y_axis, channels\r\n in_features = shape_from_backboneTF(inputs=np.random.uniform(0, 1, (1, int(5 * cfg_sed.sample_rate))), model=backbone, use_logmel=True, config_sed = cfg_sed.__dict__)[1]\r\n print(\"Num timestamps features:\",in_features)\r\n model = AudioSEDTF(backbone, num_classes=[num_classe], in_features=in_feat, hidden_size=1024, activation= 'sigmoid', use_logmel=True, \r\n spectrogram_augmentation = None, apply_attention=\"step\", drop_rate = [0.5, 0.5], config_sed= cfg_sed.__dict__)\r\n\r\n model = AudioClassifierSequenceSEDTF(model)\r\n \r\n return model\r\n\r\ncfg_sed = ConfigSED(window='hann', center=True, pad_mode='reflect', windows_size=1024, hop_size=320,\r\n sample_rate=32000, mel_bins=128, fmin=50, fmax=16000, ref=1.0, amin=1e-10, top_db=None)\r\n\r\ninputs = np.random.uniform(0,1, (1, 4, 5*32000))\r\no_tf = model_tf.predict(inputs)\r\nprint(o_tf[0])\r\n\r\n```\r\n\r\n# Examples 2:\r\n\r\nYou can find [here](https://github.com/Shiro-LK/Portfolio-project/tree/main/BirdsCall_Detection) an example of how this model is used with a GUI for inference with some checkpoint available.\r\n\r\n\r\n\r\n# Citation \r\n\r\n- [PANNs: Large-Scale Pretrained Audio Neural Networks for Audio Pattern Recognition](https://arxiv.org/abs/1912.10211)\r\n- PANNs model: https://github.com/qiuqiangkong/audioset_tagging_cnn\r\n- https://github.com/Shiro-LK/tflibrosa\r\n",
"bugtrack_url": null,
"license": "MIT License",
"summary": "Implementation of audio SED architecture for tensorflow and pytorch.",
"version": "0.0.1.1",
"project_urls": {
"Download": "https://github.com/Shiro-LK/SoundEventDetection.git",
"Homepage": "https://github.com/Shiro-LK/SoundEventDetection"
},
"split_keywords": [
"torch",
" tensorflow",
" audio_sed"
],
"urls": [
{
"comment_text": "",
"digests": {
"blake2b_256": "d1f9320605d11e7e7ce25c247d07e20a7f0a6f9abad82f4864e4e27e9a899e7a",
"md5": "9351588b36360aa0817b49f36628de44",
"sha256": "ac764d4b5e3a326e118a6889f68e0c4e7f685256f59363fac55f612fee7a968e"
},
"downloads": -1,
"filename": "audio_sed-0.0.1.1-py3-none-any.whl",
"has_sig": false,
"md5_digest": "9351588b36360aa0817b49f36628de44",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": null,
"size": 12372,
"upload_time": "2024-05-05T18:57:02",
"upload_time_iso_8601": "2024-05-05T18:57:02.461353Z",
"url": "https://files.pythonhosted.org/packages/d1/f9/320605d11e7e7ce25c247d07e20a7f0a6f9abad82f4864e4e27e9a899e7a/audio_sed-0.0.1.1-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": "",
"digests": {
"blake2b_256": "e989d1b65527727e6a75aa165e2bd71a637a6a946c3947afdfaec725eaaddd6f",
"md5": "b8cbbcd98f4a81e3e95ed08041d6c2d5",
"sha256": "59302a04cf10388abefa4f207e71f4674efc0489a4a0b50d218f7afc2a3bc449"
},
"downloads": -1,
"filename": "audio_sed-0.0.1.1.tar.gz",
"has_sig": false,
"md5_digest": "b8cbbcd98f4a81e3e95ed08041d6c2d5",
"packagetype": "sdist",
"python_version": "source",
"requires_python": null,
"size": 10114,
"upload_time": "2024-05-05T18:57:03",
"upload_time_iso_8601": "2024-05-05T18:57:03.773530Z",
"url": "https://files.pythonhosted.org/packages/e9/89/d1b65527727e6a75aa165e2bd71a637a6a946c3947afdfaec725eaaddd6f/audio_sed-0.0.1.1.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2024-05-05 18:57:03",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "Shiro-LK",
"github_project": "SoundEventDetection",
"travis_ci": false,
"coveralls": false,
"github_actions": false,
"lcname": "audio-sed"
}