# diffsptk
*diffsptk* is a differentiable version of [SPTK](https://github.com/sp-nitech/SPTK) based on the PyTorch framework.
[![Latest Manual](https://img.shields.io/badge/docs-latest-blue.svg)](https://sp-nitech.github.io/diffsptk/latest/)
[![Stable Manual](https://img.shields.io/badge/docs-stable-blue.svg)](https://sp-nitech.github.io/diffsptk/2.2.0/)
[![Downloads](https://static.pepy.tech/badge/diffsptk)](https://pepy.tech/project/diffsptk)
[![Python Version](https://img.shields.io/pypi/pyversions/diffsptk.svg)](https://pypi.python.org/pypi/diffsptk)
[![PyTorch Version](https://img.shields.io/badge/pytorch-2.0.0%20%7C%202.4.0-orange.svg)](https://pypi.python.org/pypi/diffsptk)
[![PyPI Version](https://img.shields.io/pypi/v/diffsptk.svg)](https://pypi.python.org/pypi/diffsptk)
[![Codecov](https://codecov.io/gh/sp-nitech/diffsptk/branch/master/graph/badge.svg)](https://app.codecov.io/gh/sp-nitech/diffsptk)
[![License](https://img.shields.io/github/license/sp-nitech/diffsptk.svg)](https://github.com/sp-nitech/diffsptk/blob/master/LICENSE)
[![GitHub Actions](https://github.com/sp-nitech/diffsptk/workflows/package/badge.svg)](https://github.com/sp-nitech/diffsptk/actions)
[![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff)
## Requirements
- Python 3.9+
- PyTorch 2.0.0+
## Documentation
- See [this page](https://sp-nitech.github.io/diffsptk/latest/) for a reference manual.
- Our [paper](https://www.isca-speech.org/archive/ssw_2023/yoshimura23_ssw.html) is available on the ISCA Archive.
## Installation
The latest stable release can be installed through PyPI by running
```sh
pip install diffsptk
```
The development release can be installed from the master branch:
```sh
pip install git+https://github.com/sp-nitech/diffsptk.git@master
```
## Examples
### Mel-cepstral analysis and synthesis
```python
import diffsptk
fl = 400 # Frame length.
fp = 80 # Frame period.
n_fft = 512 # FFT length.
M = 24 # Mel-cepstrum dimensions.
# Read waveform.
x, sr = diffsptk.read("assets/data.wav")
# Compute STFT amplitude of x.
stft = diffsptk.STFT(frame_length=fl, frame_period=fp, fft_length=n_fft)
X = stft(x)
# Estimate mel-cepstrum of x.
alpha = diffsptk.get_alpha(sr)
mcep = diffsptk.MelCepstralAnalysis(cep_order=M, fft_length=n_fft, alpha=alpha, n_iter=10)
mc = mcep(X)
# Reconstruct x.
mlsa = diffsptk.MLSA(filter_order=M, frame_period=fp, alpha=alpha, taylor_order=20)
x_hat = mlsa(mlsa(x, -mc), mc)
# Write reconstructed waveform.
diffsptk.write("reconst.wav", x_hat, sr)
# Compute error.
error = (x_hat - x).abs().sum()
print(error)
# Extract pitch of x.
pitch = diffsptk.Pitch(frame_period=fp, sample_rate=sr, f_min=80, f_max=180)
p = pitch(x)
# Generate excitation signal.
excite = diffsptk.ExcitationGeneration(frame_period=fp)
e = excite(p)
n = diffsptk.nrand(x.size(0) - 1)
# Synthesize waveform.
x_voiced = mlsa(e, mc)
x_unvoiced = mlsa(n, mc)
# Output analysis-synthesis result.
diffsptk.write("voiced.wav", x_voiced, sr)
diffsptk.write("unvoiced.wav", x_unvoiced, sr)
```
### LPC analysis and synthesis
```python
import diffsptk
fl = 400 # Frame length.
fp = 80 # Frame period.
M = 24 # LPC dimensions.
# Read waveform.
x, sr = diffsptk.read("assets/data.wav")
# Estimate LPC of x.
frame = diffsptk.Frame(frame_length=fl, frame_period=fp)
window = diffsptk.Window(in_length=fl)
lpc = diffsptk.LPC(frame_length=fl, lpc_order=M, eps=1e-6)
a = lpc(window(frame(x)))
# Convert to inverse filter coefficients.
norm0 = diffsptk.AllPoleToAllZeroDigitalFilterCoefficients(filter_order=M)
b = norm0(a)
# Reconstruct x.
zerodf = diffsptk.AllZeroDigitalFilter(filter_order=M, frame_period=fp)
poledf = diffsptk.AllPoleDigitalFilter(filter_order=M, frame_period=fp)
x_hat = poledf(zerodf(x, b), a)
# Write reconstructed waveform.
diffsptk.write("reconst.wav", x_hat, sr)
# Compute error.
error = (x_hat - x).abs().sum()
print(error)
```
### Mel-spectrogram, MFCC, and PLP extraction
```python
import diffsptk
fl = 400 # Frame length
fp = 80 # Frame period
n_fft = 512 # FFT length
n_channel = 80 # Number of channels
M = 12 # MFCC/PLP dimensions
# Read waveform.
x, sr = diffsptk.read("assets/data.wav")
# Compute STFT amplitude of x.
stft = diffsptk.STFT(frame_length=fl, frame_period=fp, fft_length=n_fft)
X = stft(x)
# Extract log mel-spectrogram.
fbank = diffsptk.MelFilterBankAnalysis(
n_channel=n_channel,
fft_length=n_fft,
sample_rate=sr,
)
Y = fbank(X)
print(Y.shape)
# Extract MFCC.
mfcc = diffsptk.MFCC(
mfcc_order=M,
n_channel=n_channel,
fft_length=n_fft,
sample_rate=sr,
)
Y = mfcc(X)
print(Y.shape)
# Extract PLP.
plp = diffsptk.PLP(
plp_order=M,
n_channel=n_channel,
fft_length=n_fft,
sample_rate=sr,
)
Y = plp(X)
print(Y.shape)
```
### Subband decomposition
```python
import diffsptk
K = 4 # Number of subbands.
M = 40 # Order of filter.
# Read waveform.
x, sr = diffsptk.read("assets/data.wav")
# Decompose x.
pqmf = diffsptk.PQMF(K, M)
decimate = diffsptk.Decimation(K)
y = decimate(pqmf(x))
# Reconstruct x.
interpolate = diffsptk.Interpolation(K)
ipqmf = diffsptk.IPQMF(K, M)
x_hat = ipqmf(interpolate(K * y)).reshape(-1)
# Write reconstructed waveform.
diffsptk.write("reconst.wav", x_hat, sr)
# Compute error.
error = (x_hat - x).abs().sum()
print(error)
```
### Constant-Q transform
```python
import diffsptk
import librosa # This is to get sample audio.
fp = 128 # Frame period.
K = 252 # Number of CQ-bins.
B = 36 # Number of bins per octave.
# Read waveform.
x, sr = diffsptk.read(librosa.ex("trumpet"))
# Transform x.
cqt = diffsptk.CQT(fp, sr, n_bin=K, n_bin_per_octave=B)
c = cqt(x)
# Reconstruct x.
icqt = diffsptk.ICQT(fp, sr, n_bin=K, n_bin_per_octave=B)
x_hat = icqt(c, out_length=x.size(0))
# Write reconstructed waveform.
diffsptk.write("reconst.wav", x_hat, sr)
# Compute error.
error = (x_hat - x).abs().sum()
print(error)
```
### Modified discrete cosine transform
```python
import diffsptk
fl = 512 # Frame length.
# Read waveform.
x, sr = diffsptk.read("assets/data.wav")
# Transform x.
mdct = diffsptk.MDCT(fl)
c = mdct(x)
# Reconstruct x.
imdct = diffpstk.IMDCT(fl)
x_hat = imdct(c, out_length=x.size(0))
# Write reconstructed waveform.
diffsptk.write("reconst.wav", x_hat, sr)
# Compute error.
error = (x_hat - x).abs().sum()
print(error)
```
### Vector quantization
```python
import diffsptk
K = 2 # Codebook size.
M = 4 # Order of vector.
# Prepare input.
x = diffsptk.nrand(M)
# Quantize x.
vq = diffsptk.VectorQuantization(M, K)
x_hat, indices, commitment_loss = vq(x)
# Compute error.
error = (x_hat - x).abs().sum()
print(error)
```
## License
This software is released under the Apache License 2.0.
## Citation
```bibtex
@InProceedings{sp-nitech2023sptk,
author = {Takenori Yoshimura and Takato Fujimoto and Keiichiro Oura and Keiichi Tokuda},
title = {{SPTK4}: An open-source software toolkit for speech signal processing},
booktitle = {12th ISCA Speech Synthesis Workshop (SSW 2023)},
pages = {211--217},
year = {2023},
}
```
Raw data
{
"_id": null,
"home_page": null,
"name": "diffsptk",
"maintainer": null,
"docs_url": null,
"requires_python": ">=3.9",
"maintainer_email": "Takenori Yoshimura <takenori@sp.nitech.ac.jp>",
"keywords": "dsp, pytorch, signal processing, sptk",
"author": "SPTK Working Group",
"author_email": null,
"download_url": null,
"platform": null,
"description": "# diffsptk\n\n*diffsptk* is a differentiable version of [SPTK](https://github.com/sp-nitech/SPTK) based on the PyTorch framework.\n\n[![Latest Manual](https://img.shields.io/badge/docs-latest-blue.svg)](https://sp-nitech.github.io/diffsptk/latest/)\n[![Stable Manual](https://img.shields.io/badge/docs-stable-blue.svg)](https://sp-nitech.github.io/diffsptk/2.2.0/)\n[![Downloads](https://static.pepy.tech/badge/diffsptk)](https://pepy.tech/project/diffsptk)\n[![Python Version](https://img.shields.io/pypi/pyversions/diffsptk.svg)](https://pypi.python.org/pypi/diffsptk)\n[![PyTorch Version](https://img.shields.io/badge/pytorch-2.0.0%20%7C%202.4.0-orange.svg)](https://pypi.python.org/pypi/diffsptk)\n[![PyPI Version](https://img.shields.io/pypi/v/diffsptk.svg)](https://pypi.python.org/pypi/diffsptk)\n[![Codecov](https://codecov.io/gh/sp-nitech/diffsptk/branch/master/graph/badge.svg)](https://app.codecov.io/gh/sp-nitech/diffsptk)\n[![License](https://img.shields.io/github/license/sp-nitech/diffsptk.svg)](https://github.com/sp-nitech/diffsptk/blob/master/LICENSE)\n[![GitHub Actions](https://github.com/sp-nitech/diffsptk/workflows/package/badge.svg)](https://github.com/sp-nitech/diffsptk/actions)\n[![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff)\n\n## Requirements\n\n- Python 3.9+\n- PyTorch 2.0.0+\n\n## Documentation\n\n- See [this page](https://sp-nitech.github.io/diffsptk/latest/) for a reference manual.\n- Our [paper](https://www.isca-speech.org/archive/ssw_2023/yoshimura23_ssw.html) is available on the ISCA Archive.\n\n## Installation\n\nThe latest stable release can be installed through PyPI by running\n\n```sh\npip install diffsptk\n```\n\nThe development release can be installed from the master branch:\n\n```sh\npip install git+https://github.com/sp-nitech/diffsptk.git@master\n```\n\n## Examples\n\n### Mel-cepstral analysis and synthesis\n\n```python\nimport diffsptk\n\nfl = 400 # Frame length.\nfp = 80 # Frame period.\nn_fft = 512 # FFT length.\nM = 24 # Mel-cepstrum dimensions.\n\n# Read waveform.\nx, sr = diffsptk.read(\"assets/data.wav\")\n\n# Compute STFT amplitude of x.\nstft = diffsptk.STFT(frame_length=fl, frame_period=fp, fft_length=n_fft)\nX = stft(x)\n\n# Estimate mel-cepstrum of x.\nalpha = diffsptk.get_alpha(sr)\nmcep = diffsptk.MelCepstralAnalysis(cep_order=M, fft_length=n_fft, alpha=alpha, n_iter=10)\nmc = mcep(X)\n\n# Reconstruct x.\nmlsa = diffsptk.MLSA(filter_order=M, frame_period=fp, alpha=alpha, taylor_order=20)\nx_hat = mlsa(mlsa(x, -mc), mc)\n\n# Write reconstructed waveform.\ndiffsptk.write(\"reconst.wav\", x_hat, sr)\n\n# Compute error.\nerror = (x_hat - x).abs().sum()\nprint(error)\n\n# Extract pitch of x.\npitch = diffsptk.Pitch(frame_period=fp, sample_rate=sr, f_min=80, f_max=180)\np = pitch(x)\n\n# Generate excitation signal.\nexcite = diffsptk.ExcitationGeneration(frame_period=fp)\ne = excite(p)\nn = diffsptk.nrand(x.size(0) - 1)\n\n# Synthesize waveform.\nx_voiced = mlsa(e, mc)\nx_unvoiced = mlsa(n, mc)\n\n# Output analysis-synthesis result.\ndiffsptk.write(\"voiced.wav\", x_voiced, sr)\ndiffsptk.write(\"unvoiced.wav\", x_unvoiced, sr)\n```\n\n### LPC analysis and synthesis\n\n```python\nimport diffsptk\n\nfl = 400 # Frame length.\nfp = 80 # Frame period.\nM = 24 # LPC dimensions.\n\n# Read waveform.\nx, sr = diffsptk.read(\"assets/data.wav\")\n\n# Estimate LPC of x.\nframe = diffsptk.Frame(frame_length=fl, frame_period=fp)\nwindow = diffsptk.Window(in_length=fl)\nlpc = diffsptk.LPC(frame_length=fl, lpc_order=M, eps=1e-6)\na = lpc(window(frame(x)))\n\n# Convert to inverse filter coefficients.\nnorm0 = diffsptk.AllPoleToAllZeroDigitalFilterCoefficients(filter_order=M)\nb = norm0(a)\n\n# Reconstruct x.\nzerodf = diffsptk.AllZeroDigitalFilter(filter_order=M, frame_period=fp)\npoledf = diffsptk.AllPoleDigitalFilter(filter_order=M, frame_period=fp)\nx_hat = poledf(zerodf(x, b), a)\n\n# Write reconstructed waveform.\ndiffsptk.write(\"reconst.wav\", x_hat, sr)\n\n# Compute error.\nerror = (x_hat - x).abs().sum()\nprint(error)\n```\n\n### Mel-spectrogram, MFCC, and PLP extraction\n\n```python\nimport diffsptk\n\nfl = 400 # Frame length\nfp = 80 # Frame period\nn_fft = 512 # FFT length\nn_channel = 80 # Number of channels\nM = 12 # MFCC/PLP dimensions\n\n# Read waveform.\nx, sr = diffsptk.read(\"assets/data.wav\")\n\n# Compute STFT amplitude of x.\nstft = diffsptk.STFT(frame_length=fl, frame_period=fp, fft_length=n_fft)\nX = stft(x)\n\n# Extract log mel-spectrogram.\nfbank = diffsptk.MelFilterBankAnalysis(\n n_channel=n_channel,\n fft_length=n_fft,\n sample_rate=sr,\n)\nY = fbank(X)\nprint(Y.shape)\n\n# Extract MFCC.\nmfcc = diffsptk.MFCC(\n mfcc_order=M,\n n_channel=n_channel,\n fft_length=n_fft,\n sample_rate=sr,\n)\nY = mfcc(X)\nprint(Y.shape)\n\n# Extract PLP.\nplp = diffsptk.PLP(\n plp_order=M,\n n_channel=n_channel,\n fft_length=n_fft,\n sample_rate=sr,\n)\nY = plp(X)\nprint(Y.shape)\n```\n\n### Subband decomposition\n\n```python\nimport diffsptk\n\nK = 4 # Number of subbands.\nM = 40 # Order of filter.\n\n# Read waveform.\nx, sr = diffsptk.read(\"assets/data.wav\")\n\n# Decompose x.\npqmf = diffsptk.PQMF(K, M)\ndecimate = diffsptk.Decimation(K)\ny = decimate(pqmf(x))\n\n# Reconstruct x.\ninterpolate = diffsptk.Interpolation(K)\nipqmf = diffsptk.IPQMF(K, M)\nx_hat = ipqmf(interpolate(K * y)).reshape(-1)\n\n# Write reconstructed waveform.\ndiffsptk.write(\"reconst.wav\", x_hat, sr)\n\n# Compute error.\nerror = (x_hat - x).abs().sum()\nprint(error)\n```\n\n### Constant-Q transform\n\n```python\nimport diffsptk\nimport librosa # This is to get sample audio.\n\nfp = 128 # Frame period.\nK = 252 # Number of CQ-bins.\nB = 36 # Number of bins per octave.\n\n# Read waveform.\nx, sr = diffsptk.read(librosa.ex(\"trumpet\"))\n\n# Transform x.\ncqt = diffsptk.CQT(fp, sr, n_bin=K, n_bin_per_octave=B)\nc = cqt(x)\n\n# Reconstruct x.\nicqt = diffsptk.ICQT(fp, sr, n_bin=K, n_bin_per_octave=B)\nx_hat = icqt(c, out_length=x.size(0))\n\n# Write reconstructed waveform.\ndiffsptk.write(\"reconst.wav\", x_hat, sr)\n\n# Compute error.\nerror = (x_hat - x).abs().sum()\nprint(error)\n```\n\n### Modified discrete cosine transform\n\n```python\nimport diffsptk\n\nfl = 512 # Frame length.\n\n# Read waveform.\nx, sr = diffsptk.read(\"assets/data.wav\")\n\n# Transform x.\nmdct = diffsptk.MDCT(fl)\nc = mdct(x)\n\n# Reconstruct x.\nimdct = diffpstk.IMDCT(fl)\nx_hat = imdct(c, out_length=x.size(0))\n\n# Write reconstructed waveform.\ndiffsptk.write(\"reconst.wav\", x_hat, sr)\n\n# Compute error.\nerror = (x_hat - x).abs().sum()\nprint(error)\n```\n\n### Vector quantization\n\n```python\nimport diffsptk\n\nK = 2 # Codebook size.\nM = 4 # Order of vector.\n\n# Prepare input.\nx = diffsptk.nrand(M)\n\n# Quantize x.\nvq = diffsptk.VectorQuantization(M, K)\nx_hat, indices, commitment_loss = vq(x)\n\n# Compute error.\nerror = (x_hat - x).abs().sum()\nprint(error)\n```\n\n## License\n\nThis software is released under the Apache License 2.0.\n\n## Citation\n\n```bibtex\n@InProceedings{sp-nitech2023sptk,\n author = {Takenori Yoshimura and Takato Fujimoto and Keiichiro Oura and Keiichi Tokuda},\n title = {{SPTK4}: An open-source software toolkit for speech signal processing},\n booktitle = {12th ISCA Speech Synthesis Workshop (SSW 2023)},\n pages = {211--217},\n year = {2023},\n}\n```\n",
"bugtrack_url": null,
"license": "Apache 2.0",
"summary": "Speech signal processing modules for machine learning",
"version": "2.2.0",
"project_urls": {
"Documentation": "https://sp-nitech.github.io/diffsptk/latest/",
"Homepage": "https://sp-tk.sourceforge.net/",
"Source": "https://github.com/sp-nitech/diffsptk"
},
"split_keywords": [
"dsp",
" pytorch",
" signal processing",
" sptk"
],
"urls": [
{
"comment_text": "",
"digests": {
"blake2b_256": "9d153ee19b86240860cecacd1dad444819888c6db45db1b77f989bdc0e1d49dd",
"md5": "a954f281ff695644d58924441874891f",
"sha256": "34023bf829667c0810fae0d6ee76918bc97ae6aa44c55160012827b7cb810e4a"
},
"downloads": -1,
"filename": "diffsptk-2.2.0-py3-none-any.whl",
"has_sig": false,
"md5_digest": "a954f281ff695644d58924441874891f",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": ">=3.9",
"size": 189307,
"upload_time": "2024-08-20T02:11:24",
"upload_time_iso_8601": "2024-08-20T02:11:24.733872Z",
"url": "https://files.pythonhosted.org/packages/9d/15/3ee19b86240860cecacd1dad444819888c6db45db1b77f989bdc0e1d49dd/diffsptk-2.2.0-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2024-08-20 02:11:24",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "sp-nitech",
"github_project": "diffsptk",
"travis_ci": false,
"coveralls": false,
"github_actions": true,
"lcname": "diffsptk"
}