# EvoAug2
EvoAug2 is a PyTorch package to pretrain sequence-based deep learning models for regulatory genomics with evolution-inspired data augmentations, followed by fine-tuning on the original, unperturbed data. The new version replaces the prior model-wrapper (`RobustModel`) with a loader-first design (`RobustLoader`) that applies augmentations on-the-fly within a drop-in `DataLoader`.
All augmentations are length-preserving: inputs with shape (N, A, L) always return outputs with the exact same shape.
For questions, email: koo@cshl.edu
<img src="fig/augmentations.png" alt="fig" width="500"/>
<img src="fig/overview.png" alt="overview" width="500"/>
## Install
```bash
pip install evoaug2
```
## Installation Options
### **Option 1: Install from PyPI (Recommended)**
```bash
# Install the latest stable release
pip install evoaug2
# Install with specific version
pip install evoaug2==2.0.2
# Install with optional dependencies for examples
pip install evoaug2[examples]
# Install with all optional dependencies
pip install evoaug2[full]
```
### **Option 2: Install from Source (Development)**
```bash
# Clone the repository
git clone https://github.com/aduranu/evoaug.git
cd evoaug2
# Install in development mode
pip install -e .
# Or install with development dependencies
pip install -e .[dev]
```
### **Option 3: Install with Conda/Mamba**
```bash
# Create a new environment (recommended)
conda create -n evoaug2 python=3.8
conda activate evoaug2
# Install PyTorch first (choose appropriate version)
conda install pytorch pytorch-cuda=11.8 -c pytorch -c nvidia
# Install EvoAug2
pip install evoaug2
```
## Dependencies
```text
torch >= 1.9.0
pytorch-lightning >= 1.5.0
numpy >= 1.20.0
scipy >= 1.7.0
h5py >= 3.1.0
scikit-learn >= 1.0.0
```
Note: The examples use `pytorch_lightning` (imported as `import pytorch_lightning as pl`). If you use the newer `lightning.pytorch` package, adapt the `Trainer` import and arguments accordingly.
## Quick Start
```python
# Install the package
pip install evoaug2
# Import and use
from evoaug import evoaug, augment
from utils import utils
# Create augmentations
augment_list = [
augment.RandomDeletion(delete_min=0, delete_max=20),
augment.RandomRC(rc_prob=0.5),
augment.RandomMutation(mut_frac=0.05),
]
# Create a RobustLoader
loader = evoaug.RobustLoader(
base_dataset=your_dataset,
augment_list=augment_list,
max_augs_per_seq=2,
hard_aug=True,
batch_size=32
)
# Use in training
for x, y in loader:
# x has shape (N, A, L) with augmentations applied
# Your training code here
pass
```
## Use Cases
EvoAug2 provides two main usage patterns, both demonstrated in the included example scripts:
### **Use Case 1: PyTorch Lightning DataModule (Recommended)**
The `example_lightning_module.py` script demonstrates the complete two-stage training workflow:
```python
from evoaug.evoaug import RobustLoader
from evoaug import augment
import pytorch_lightning as pl
# Define augmentations
augment_list = [
augment.RandomTranslocation(shift_min=0, shift_max=20),
augment.RandomRC(rc_prob=0.0),
augment.RandomMutation(mut_frac=0.05),
augment.RandomNoise(noise_mean=0.0, noise_std=0.3),
]
# Create Lightning DataModule with augmentations
class AugmentedDataModule(pl.LightningDataModule):
def __init__(self, base_dataset, augment_list, max_augs_per_seq, hard_aug):
super().__init__()
self.base_dataset = base_dataset
self.augment_list = augment_list
self.max_augs_per_seq = max_augs_per_seq
self.hard_aug = hard_aug
def train_dataloader(self):
# Training with augmentations
train_dataset = self.base_dataset.get_train_dataset()
return RobustLoader(
base_dataset=train_dataset,
augment_list=self.augment_list,
max_augs_per_seq=self.max_augs_per_seq,
hard_aug=self.hard_aug,
batch_size=self.base_dataset.batch_size,
shuffle=True
)
def val_dataloader(self):
# Validation without augmentations
val_dataset = self.base_dataset.get_val_dataset()
loader = RobustLoader(
base_dataset=val_dataset,
augment_list=self.augment_list,
max_augs_per_seq=self.max_augs_per_seq,
hard_aug=self.hard_aug,
batch_size=self.base_dataset.batch_size,
shuffle=False
)
loader.disable_augmentations() # No augs for validation
return loader
# Two-stage training workflow
# Stage 1: Train with augmentations
data_module = AugmentedDataModule(base_dataset, augment_list, max_augs_per_seq=2, hard_aug=True)
trainer = pl.Trainer(max_epochs=100, accelerator='auto', devices='auto')
trainer.fit(model, datamodule=data_module)
# Stage 2: Fine-tune on original data
class FineTuneDataModule(pl.LightningDataModule):
def __init__(self, base_dataset):
super().__init__()
self.base_dataset = base_dataset
def train_dataloader(self):
return self.base_dataset.train_dataloader()
def val_dataloader(self):
return self.base_dataset.val_dataloader()
finetune_dm = FineTuneDataModule(base_dataset)
trainer_finetune = pl.Trainer(max_epochs=5, accelerator='auto', devices='auto')
trainer_finetune.fit(model_finetune, datamodule=finetune_dm)
```
**Key Features:**
- Automatic checkpoint management and resuming
- Comprehensive performance comparison plots
- Two-stage training: augmentations → fine-tuning
- Control model training for baseline comparison
### **Use Case 2: Vanilla PyTorch Training Loop**
The `example_vanilla_pytorch.py` script shows direct usage without Lightning:
```python
from evoaug.evoaug import RobustLoader
from evoaug import augment
import torch
import torch.nn as nn
# Create augmentations
augment_list = [
augment.RandomTranslocation(shift_min=0, shift_max=20),
augment.RandomRC(rc_prob=0.0),
augment.RandomMutation(mut_frac=0.05),
augment.RandomNoise(noise_mean=0.0, noise_std=0.3),
]
# Create RobustLoader
train_loader = RobustLoader(
base_dataset=base_dataset,
augment_list=augment_list,
max_augs_per_seq=2,
hard_aug=True,
batch_size=128,
shuffle=True,
num_workers=4,
)
# Training loop
model = Model(...)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
for epoch in range(num_epochs):
model.train()
for x, y in train_loader:
x, y = x.to(device), y.to(device)
optimizer.zero_grad()
y_hat = model(x)
loss = criterion(y_hat, y)
loss.backward()
optimizer.step()
```
**Key Features:**
- Minimal dependencies (no Lightning required)
- Simple CNN architecture with global average pooling
- Direct control over training loop
- Easy to modify and extend
## Troubleshooting
### **Common Issues**
**Import Error: No module named 'evoaug'**
```bash
# Make sure you installed the correct package name
pip install evoaug2 # NOT evoaug
```
**CUDA/GPU Issues**
```bash
# Install PyTorch with CUDA support first
pip install torch torchvision --index-url https://download.pytorch.org/whl/cu118
# Then install EvoAug2
pip install evoaug2
```
**Version Conflicts**
```bash
# Create a clean environment
conda create -n evoaug2 python=3.8
conda activate evoaug2
pip install evoaug2
```
**Memory Issues with Large Datasets**
```python
# Reduce batch size or use gradient accumulation
loader = evoaug.RobustLoader(
base_dataset=dataset,
augment_list=augment_list,
batch_size=16, # Reduce from 32
num_workers=2 # Reduce workers if needed
)
```
### **Getting Help**
- **GitHub Issues**: Report bugs at https://github.com/aduranu/evoaug/issues
- **Email**: koo@cshl.edu
- **Documentation**: See example scripts for complete usage examples
## Package Structure
```
evoaug2/
├── evoaug/ # Core augmentation package
│ ├── __init__.py # Package exports
│ ├── augment.py # Augmentation implementations
│ └── evoaug.py # RobustLoader and dataset classes
├── utils/ # Utility functions
│ ├── __init__.py # Utility exports
│ ├── model_zoo.py # Model architectures
│ └── utils.py # H5Dataset and evaluation tools
├── example_lightning_module.py # Complete Lightning training example
├── example_vanilla_pytorch.py # Simple PyTorch training example
├── setup.py # Package configuration
├── pyproject.toml # Modern Python packaging
├── requirements.txt # Core dependencies
└── README.md # This file
```
## What changed (RobustModel → RobustLoader)
- The training wrapper is no longer required. Instead of wrapping a model in `RobustModel`, EvoAug2 provides a `RobustLoader` that augments data during loading.
- Works with any PyTorch model, any dataset returning `(sequence, target)` with `sequence` shaped as (A, L).
- Augmentations can be toggled per-loader: `loader.enable_augmentations()` / `loader.disable_augmentations()`.
- Fine-tuning stage is implemented by disabling augmentations on the same dataset/loader.
Quick migration:
- Before: wrap model with `evoaug.RobustModel(...)` and pass a normal DataLoader.
- Now: create a `RobustLoader(base_dataset, augment_list, ...)` and pass the loader to your Trainer or training loop.
## Augmentations
```python
from evoaug import augment
augment_list = [
augment.RandomDeletion(delete_min=0, delete_max=30),
augment.RandomTranslocation(shift_min=0, shift_max=20),
augment.RandomInsertion(insert_min=0, insert_max=20),
augment.RandomRC(rc_prob=0.0),
augment.RandomMutation(mut_frac=0.05),
augment.RandomNoise(noise_mean=0.0, noise_std=0.3),
]
```
All transforms keep sequence length exactly L and operate on batches shaped (N, A, L).
## Two-stage workflow (recommended)
1. **Pretrain** with EvoAug2 augmentations using `RobustLoader` (e.g., 100 epochs).
2. **Fine-tune** the same architecture on original data with augmentations disabled (e.g., 5 epochs, lower LR).
3. **Optionally**, train a control model on original data only for baseline comparison.
This mirrors the EvoAug methodology and typically improves robustness and generalization.
## Reference
- Paper: "EvoAug: improving generalization and interpretability of genomic deep neural networks with evolution-inspired data augmentations" (Genome Biology, 2023).
```bibtex
@article{lee2023evoaug,
title={EvoAug: improving generalization and interpretability of genomic deep neural networks with evolution-inspired data augmentations},
author={Lee, Nicholas Keone and Tang, Ziqi and Toneyan, Shushan and Koo, Peter K},
journal={Genome Biology},
volume={24},
number={1},
pages={105},
year={2023},
publisher={Springer}
}
```
Raw data
{
"_id": null,
"home_page": "https://github.com/aduranu/evoaug",
"name": "evoaug2",
"maintainer": "Peter K. Koo",
"docs_url": null,
"requires_python": ">=3.8",
"maintainer_email": "\"Peter K. Koo\" <koo@cshl.edu>",
"keywords": "genomics, data-augmentation, deep-learning, pytorch, bioinformatics",
"author": "Peter K. Koo",
"author_email": "\"Peter K. Koo\" <koo@cshl.edu>",
"download_url": "https://files.pythonhosted.org/packages/64/b4/6a97177b1f73f1898ba1988e6cab41fe58576fc51757c1181eab98db291d/evoaug2-2.0.3.tar.gz",
"platform": "any",
"description": "# EvoAug2\n\nEvoAug2 is a PyTorch package to pretrain sequence-based deep learning models for regulatory genomics with evolution-inspired data augmentations, followed by fine-tuning on the original, unperturbed data. The new version replaces the prior model-wrapper (`RobustModel`) with a loader-first design (`RobustLoader`) that applies augmentations on-the-fly within a drop-in `DataLoader`.\n\nAll augmentations are length-preserving: inputs with shape (N, A, L) always return outputs with the exact same shape.\n\nFor questions, email: koo@cshl.edu\n\n<img src=\"fig/augmentations.png\" alt=\"fig\" width=\"500\"/>\n\n<img src=\"fig/overview.png\" alt=\"overview\" width=\"500\"/>\n\n## Install\n\n```bash\npip install evoaug2\n```\n\n## Installation Options\n\n### **Option 1: Install from PyPI (Recommended)**\n\n```bash\n# Install the latest stable release\npip install evoaug2\n\n# Install with specific version\npip install evoaug2==2.0.2\n\n# Install with optional dependencies for examples\npip install evoaug2[examples]\n\n# Install with all optional dependencies\npip install evoaug2[full]\n```\n\n### **Option 2: Install from Source (Development)**\n\n```bash\n# Clone the repository\ngit clone https://github.com/aduranu/evoaug.git\ncd evoaug2\n\n# Install in development mode\npip install -e .\n\n# Or install with development dependencies\npip install -e .[dev]\n```\n\n### **Option 3: Install with Conda/Mamba**\n\n```bash\n# Create a new environment (recommended)\nconda create -n evoaug2 python=3.8\nconda activate evoaug2\n\n# Install PyTorch first (choose appropriate version)\nconda install pytorch pytorch-cuda=11.8 -c pytorch -c nvidia\n\n# Install EvoAug2\npip install evoaug2\n```\n\n## Dependencies\n\n```text\ntorch >= 1.9.0\npytorch-lightning >= 1.5.0\nnumpy >= 1.20.0\nscipy >= 1.7.0\nh5py >= 3.1.0\nscikit-learn >= 1.0.0\n```\n\nNote: The examples use `pytorch_lightning` (imported as `import pytorch_lightning as pl`). If you use the newer `lightning.pytorch` package, adapt the `Trainer` import and arguments accordingly.\n\n## Quick Start\n\n```python\n# Install the package\npip install evoaug2\n\n# Import and use\nfrom evoaug import evoaug, augment\nfrom utils import utils\n\n# Create augmentations\naugment_list = [\n augment.RandomDeletion(delete_min=0, delete_max=20),\n augment.RandomRC(rc_prob=0.5),\n augment.RandomMutation(mut_frac=0.05),\n]\n\n# Create a RobustLoader\nloader = evoaug.RobustLoader(\n base_dataset=your_dataset,\n augment_list=augment_list,\n max_augs_per_seq=2,\n hard_aug=True,\n batch_size=32\n)\n\n# Use in training\nfor x, y in loader:\n # x has shape (N, A, L) with augmentations applied\n # Your training code here\n pass\n```\n\n## Use Cases\n\nEvoAug2 provides two main usage patterns, both demonstrated in the included example scripts:\n\n### **Use Case 1: PyTorch Lightning DataModule (Recommended)**\n\nThe `example_lightning_module.py` script demonstrates the complete two-stage training workflow:\n\n```python\nfrom evoaug.evoaug import RobustLoader\nfrom evoaug import augment\nimport pytorch_lightning as pl\n\n# Define augmentations\naugment_list = [\n augment.RandomTranslocation(shift_min=0, shift_max=20),\n augment.RandomRC(rc_prob=0.0),\n augment.RandomMutation(mut_frac=0.05),\n augment.RandomNoise(noise_mean=0.0, noise_std=0.3),\n]\n\n# Create Lightning DataModule with augmentations\nclass AugmentedDataModule(pl.LightningDataModule):\n def __init__(self, base_dataset, augment_list, max_augs_per_seq, hard_aug):\n super().__init__()\n self.base_dataset = base_dataset\n self.augment_list = augment_list\n self.max_augs_per_seq = max_augs_per_seq\n self.hard_aug = hard_aug\n \n def train_dataloader(self):\n # Training with augmentations\n train_dataset = self.base_dataset.get_train_dataset()\n return RobustLoader(\n base_dataset=train_dataset,\n augment_list=self.augment_list,\n max_augs_per_seq=self.max_augs_per_seq,\n hard_aug=self.hard_aug,\n batch_size=self.base_dataset.batch_size,\n shuffle=True\n )\n \n def val_dataloader(self):\n # Validation without augmentations\n val_dataset = self.base_dataset.get_val_dataset()\n loader = RobustLoader(\n base_dataset=val_dataset,\n augment_list=self.augment_list,\n max_augs_per_seq=self.max_augs_per_seq,\n hard_aug=self.hard_aug,\n batch_size=self.base_dataset.batch_size,\n shuffle=False\n )\n loader.disable_augmentations() # No augs for validation\n return loader\n\n# Two-stage training workflow\n# Stage 1: Train with augmentations\ndata_module = AugmentedDataModule(base_dataset, augment_list, max_augs_per_seq=2, hard_aug=True)\ntrainer = pl.Trainer(max_epochs=100, accelerator='auto', devices='auto')\ntrainer.fit(model, datamodule=data_module)\n\n# Stage 2: Fine-tune on original data\nclass FineTuneDataModule(pl.LightningDataModule):\n def __init__(self, base_dataset):\n super().__init__()\n self.base_dataset = base_dataset\n def train_dataloader(self):\n return self.base_dataset.train_dataloader()\n def val_dataloader(self):\n return self.base_dataset.val_dataloader()\n\nfinetune_dm = FineTuneDataModule(base_dataset)\ntrainer_finetune = pl.Trainer(max_epochs=5, accelerator='auto', devices='auto')\ntrainer_finetune.fit(model_finetune, datamodule=finetune_dm)\n```\n\n**Key Features:**\n- Automatic checkpoint management and resuming\n- Comprehensive performance comparison plots\n- Two-stage training: augmentations \u2192 fine-tuning\n- Control model training for baseline comparison\n\n### **Use Case 2: Vanilla PyTorch Training Loop**\n\nThe `example_vanilla_pytorch.py` script shows direct usage without Lightning:\n\n```python\nfrom evoaug.evoaug import RobustLoader\nfrom evoaug import augment\nimport torch\nimport torch.nn as nn\n\n# Create augmentations\naugment_list = [\n augment.RandomTranslocation(shift_min=0, shift_max=20),\n augment.RandomRC(rc_prob=0.0),\n augment.RandomMutation(mut_frac=0.05),\n augment.RandomNoise(noise_mean=0.0, noise_std=0.3),\n]\n\n# Create RobustLoader\ntrain_loader = RobustLoader(\n base_dataset=base_dataset,\n augment_list=augment_list,\n max_augs_per_seq=2,\n hard_aug=True,\n batch_size=128,\n shuffle=True,\n num_workers=4,\n)\n\n# Training loop\nmodel = Model(...)\ncriterion = nn.MSELoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n\nfor epoch in range(num_epochs):\n model.train()\n for x, y in train_loader:\n x, y = x.to(device), y.to(device)\n optimizer.zero_grad()\n y_hat = model(x)\n loss = criterion(y_hat, y)\n loss.backward()\n optimizer.step()\n```\n\n**Key Features:**\n- Minimal dependencies (no Lightning required)\n- Simple CNN architecture with global average pooling\n- Direct control over training loop\n- Easy to modify and extend\n\n## Troubleshooting\n\n### **Common Issues**\n\n**Import Error: No module named 'evoaug'**\n```bash\n# Make sure you installed the correct package name\npip install evoaug2 # NOT evoaug\n```\n\n**CUDA/GPU Issues**\n```bash\n# Install PyTorch with CUDA support first\npip install torch torchvision --index-url https://download.pytorch.org/whl/cu118\n\n# Then install EvoAug2\npip install evoaug2\n```\n\n**Version Conflicts**\n```bash\n# Create a clean environment\nconda create -n evoaug2 python=3.8\nconda activate evoaug2\npip install evoaug2\n```\n\n**Memory Issues with Large Datasets**\n```python\n# Reduce batch size or use gradient accumulation\nloader = evoaug.RobustLoader(\n base_dataset=dataset,\n augment_list=augment_list,\n batch_size=16, # Reduce from 32\n num_workers=2 # Reduce workers if needed\n)\n```\n\n### **Getting Help**\n\n- **GitHub Issues**: Report bugs at https://github.com/aduranu/evoaug/issues\n- **Email**: koo@cshl.edu\n- **Documentation**: See example scripts for complete usage examples\n\n## Package Structure\n\n```\nevoaug2/\n\u251c\u2500\u2500 evoaug/ # Core augmentation package\n\u2502 \u251c\u2500\u2500 __init__.py # Package exports\n\u2502 \u251c\u2500\u2500 augment.py # Augmentation implementations\n\u2502 \u2514\u2500\u2500 evoaug.py # RobustLoader and dataset classes\n\u251c\u2500\u2500 utils/ # Utility functions\n\u2502 \u251c\u2500\u2500 __init__.py # Utility exports\n\u2502 \u251c\u2500\u2500 model_zoo.py # Model architectures\n\u2502 \u2514\u2500\u2500 utils.py # H5Dataset and evaluation tools\n\u251c\u2500\u2500 example_lightning_module.py # Complete Lightning training example\n\u251c\u2500\u2500 example_vanilla_pytorch.py # Simple PyTorch training example\n\u251c\u2500\u2500 setup.py # Package configuration\n\u251c\u2500\u2500 pyproject.toml # Modern Python packaging\n\u251c\u2500\u2500 requirements.txt # Core dependencies\n\u2514\u2500\u2500 README.md # This file\n```\n\n## What changed (RobustModel \u2192 RobustLoader)\n\n- The training wrapper is no longer required. Instead of wrapping a model in `RobustModel`, EvoAug2 provides a `RobustLoader` that augments data during loading.\n- Works with any PyTorch model, any dataset returning `(sequence, target)` with `sequence` shaped as (A, L).\n- Augmentations can be toggled per-loader: `loader.enable_augmentations()` / `loader.disable_augmentations()`.\n- Fine-tuning stage is implemented by disabling augmentations on the same dataset/loader.\n\nQuick migration:\n- Before: wrap model with `evoaug.RobustModel(...)` and pass a normal DataLoader.\n- Now: create a `RobustLoader(base_dataset, augment_list, ...)` and pass the loader to your Trainer or training loop.\n\n## Augmentations\n\n```python\nfrom evoaug import augment\n\naugment_list = [\n augment.RandomDeletion(delete_min=0, delete_max=30),\n augment.RandomTranslocation(shift_min=0, shift_max=20),\n augment.RandomInsertion(insert_min=0, insert_max=20),\n augment.RandomRC(rc_prob=0.0),\n augment.RandomMutation(mut_frac=0.05),\n augment.RandomNoise(noise_mean=0.0, noise_std=0.3),\n]\n```\nAll transforms keep sequence length exactly L and operate on batches shaped (N, A, L).\n\n## Two-stage workflow (recommended)\n\n1. **Pretrain** with EvoAug2 augmentations using `RobustLoader` (e.g., 100 epochs).\n2. **Fine-tune** the same architecture on original data with augmentations disabled (e.g., 5 epochs, lower LR).\n3. **Optionally**, train a control model on original data only for baseline comparison.\n\nThis mirrors the EvoAug methodology and typically improves robustness and generalization.\n\n## Reference\n\n- Paper: \"EvoAug: improving generalization and interpretability of genomic deep neural networks with evolution-inspired data augmentations\" (Genome Biology, 2023).\n\n```bibtex\n@article{lee2023evoaug,\n title={EvoAug: improving generalization and interpretability of genomic deep neural networks with evolution-inspired data augmentations},\n author={Lee, Nicholas Keone and Tang, Ziqi and Toneyan, Shushan and Koo, Peter K},\n journal={Genome Biology},\n volume={24},\n number={1},\n pages={105},\n year={2023},\n publisher={Springer}\n}\n``` \n",
"bugtrack_url": null,
"license": "MIT",
"summary": "Evolution-Inspired Data Augmentation for Genomic Sequences - DataLoader Version",
"version": "2.0.3",
"project_urls": {
"Homepage": "https://github.com/aduranu/evoaug",
"Issues": "https://github.com/aduranu/evoaug/issues",
"Repository": "https://github.com/aduranu/evoaug"
},
"split_keywords": [
"genomics",
" data-augmentation",
" deep-learning",
" pytorch",
" bioinformatics"
],
"urls": [
{
"comment_text": null,
"digests": {
"blake2b_256": "f3b6a7be80a848e06ebd892a5c0f67e5db8224c6e3897ececf98a69a2cf3f8d1",
"md5": "0a534163165baab9b307ec7415d6dd1a",
"sha256": "72068d3e3a176d813a5b10ac90a2b5243db45d7d3894b22dd7fe1042fd83bfc3"
},
"downloads": -1,
"filename": "evoaug2-2.0.3-py3-none-any.whl",
"has_sig": false,
"md5_digest": "0a534163165baab9b307ec7415d6dd1a",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": ">=3.8",
"size": 23537,
"upload_time": "2025-08-19T19:59:16",
"upload_time_iso_8601": "2025-08-19T19:59:16.302788Z",
"url": "https://files.pythonhosted.org/packages/f3/b6/a7be80a848e06ebd892a5c0f67e5db8224c6e3897ececf98a69a2cf3f8d1/evoaug2-2.0.3-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": null,
"digests": {
"blake2b_256": "64b46a97177b1f73f1898ba1988e6cab41fe58576fc51757c1181eab98db291d",
"md5": "2e505507a3a971b23f65b61ebdbf0905",
"sha256": "1f8cdafefb66ca8189161c94f11a07a1ca651d49158e3bdbe2507294f6fd6dc9"
},
"downloads": -1,
"filename": "evoaug2-2.0.3.tar.gz",
"has_sig": false,
"md5_digest": "2e505507a3a971b23f65b61ebdbf0905",
"packagetype": "sdist",
"python_version": "source",
"requires_python": ">=3.8",
"size": 26830,
"upload_time": "2025-08-19T19:59:17",
"upload_time_iso_8601": "2025-08-19T19:59:17.494687Z",
"url": "https://files.pythonhosted.org/packages/64/b4/6a97177b1f73f1898ba1988e6cab41fe58576fc51757c1181eab98db291d/evoaug2-2.0.3.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2025-08-19 19:59:17",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "aduranu",
"github_project": "evoaug",
"travis_ci": false,
"coveralls": false,
"github_actions": false,
"requirements": [
{
"name": "torch",
"specs": [
[
">=",
"1.9.0"
]
]
},
{
"name": "pytorch-lightning",
"specs": [
[
">=",
"1.5.0"
]
]
},
{
"name": "numpy",
"specs": [
[
">=",
"1.20.0"
]
]
},
{
"name": "scipy",
"specs": [
[
">=",
"1.7.0"
]
]
},
{
"name": "h5py",
"specs": [
[
">=",
"3.1.0"
]
]
},
{
"name": "scikit-learn",
"specs": [
[
">=",
"1.0.0"
]
]
}
],
"lcname": "evoaug2"
}