<div align="center">
<img src="https://raw.githubusercontent.com/crybot/potatorch/main/docs/potatorch-banner.png" width="100%" role="img">
**PotaTorch is a lightweight PyTorch framework specifically designed to run on hardware with limited resources.**
______________________________________________________________________
<!-- [](https://pepy.tech/project/potatorch) -->
[](https://pypi.org/project/potatorch/)
[](https://badge.fury.io/py/potatorch)

[](https://github.com/crybot/potatorch/blob/main/LICENSE)
</div>
### Installation
PotaTorch is published on PyPI, you can install it through pip:
```bash
pip install potatorch
```
or you can install it from sources:
```bash
git clone --single-branch -b main https://github.com/crybot/potatorch
pip install -e potatorch
````
______________________________________________________________________
### Minimal Working Example
You can run the following example directly from `examples/mlp.py` if you already have pytorch installed, or you can run it with docker through the provided scripts:
```bash
./build.sh && ./run.sh
```
The example trains a feed forward network on a toy problem:
```python
import torch
from torch import nn
from potatorch.training import TrainingLoop, make_optimizer
from potatorch.callbacks import ProgressbarCallback
from torch.utils.data import TensorDataset
# Fix a seed for TrainingLoop to make non-deterministic operations such as
# shuffling reproducible
SEED = 42
device = 'cuda'
epochs = 100
lr = 1e-4
# Define your model as a pytorch Module
model = nn.Sequential(nn.Linear(1, 128), nn.ReLU(), 
        nn.Linear(128, 128), nn.ReLU(),
        nn.Linear(128, 1))
# Create your dataset as a torch.data.Dataset
dataset = TensorDataset(torch.arange(1000).view(1000, 1), torch.sin(torch.arange(1000)))
# Provide a loss function and an optimizer
loss_fn = torch.nn.MSELoss()
optimizer = make_optimizer(torch.optim.Adam, lr=lr)
# Construct a TrainingLoop object.
# TrainingLoop handles the initialization of dataloaders, dataset splitting,
# shuffling, mixed precision training, etc.
# You can provide callback handles through the `callbacks` argument.
training_loop = TrainingLoop(
        dataset,
        loss_fn,
        optimizer,
        train_p=0.8,
        val_p=0.1,
        test_p=0.1,
        random_split=False,
        batch_size=None,
        shuffle=False,
        device=device,
        num_workers=0,
        seed=SEED,
        val_metrics={'l1': nn.L1Loss(), 'mse': nn.MSELoss()},
        callbacks=[
            ProgressbarCallback(epochs=epochs, width=20),
            ]
        )
# Run the training loop
model = training_loop.run(model, epochs=epochs)
```
______________________________________________________________________
### Automatic Hyperparameters Optimization
PotaTorch provides a basic set of utilities to perform hyperparameters optimization. You can choose among **grid search**, **random search** and **bayesian search**. All of them are provided by `potatorch.optimization.tuning.HyperOptimizer`. The following is a working example of a simple grid search on a toy problem. You can find the full script under `examples/grid_search.py`
```python
def train(dataset, device, config):
    """ Your usual training function that runs a TrainingLoop instance """
    SEED = 42
    # `epochs` is a fixed hyperparameter; it won't change among runs
    epochs = config['epochs']
    # Define your model as a pytorch Module
    model = nn.Sequential(nn.Linear(1, 128), nn.ReLU(), 
            nn.Linear(128, 128), nn.ReLU(),
            nn.Linear(128, 1))
    loss_fn = torch.nn.MSELoss()
    # `lr` is a dynamic hyperparameter; it will change among runs
    optimizer = make_optimizer(torch.optim.Adam, lr=config['lr'])
    training_loop = TrainingLoop(
            dataset,
            loss_fn,
            optimizer,
            train_p=0.8,
            val_p=0.1,
            test_p=0.1,
            random_split=False,
            batch_size=None,
            shuffle=False,
            device=device,
            num_workers=0,
            seed=SEED,
            val_metrics={'l1': nn.L1Loss(), 'mse': nn.MSELoss()},
            callbacks=[
                ProgressbarCallback(epochs=epochs, width=20),
                ]
            )
    model = training_loop.run(model, epochs=epochs, verbose=1)
    # Return a dictionary containing the training and validation metrics 
    # calculated during the last epoch of the loop
    return training_loop.get_last_metrics()
# Define your search configuration
search_config = {
        'method': 'grid',   # which search method to use: ['grid', 'bayes', 'random']
        'metric': {
            'name': 'val_loss', # the metric you're optimizing
            'goal': 'minimize'  # whether you want to minimize or maximize it
        },
        'parameters': { # the set of hyperparameters you want to optimize
            'lr': {
                'values': [1e-2, 1e-3, 1e-4]    # a range of values for the grid search to try
            }
        },
        'fixed': {      # fixed hyperparameters that won't change among runs
            'epochs': 200
        }
    }
def main():
    device = 'cuda'
    dataset = TensorDataset(torch.arange(1000).view(1000, 1), torch.sin(torch.arange(1000)))
    # Apply additional parameters to the train function to have f(config) -> {}
    score_function = partial(train, dataset, device)
    # Construct the hyperparameters optimizer
    hyperoptimizer = HyperOptimizer(search_config)
    # Run the optimization over the hyperparameters space
    config, error = hyperoptimizer.optimize(score_function, return_error=True)
    print('Best configuration found: {}\n with error: {}'.format(config, error))
```
            
         
        Raw data
        
            {
    "_id": null,
    "home_page": null,
    "name": "potatorch",
    "maintainer": null,
    "docs_url": null,
    "requires_python": ">=3.8",
    "maintainer_email": null,
    "keywords": "deep learning, framework, limited resources, machine learning, python, pytorch",
    "author": null,
    "author_email": "Marco Pampaloni <marco.pampaloni1@gmail.com>",
    "download_url": "https://files.pythonhosted.org/packages/49/19/f272c044c72cf8d95c019e3f202efcf0046462ba1ae21c0affb732ecd628/potatorch-0.0.4.tar.gz",
    "platform": null,
    "description": "\n<div align=\"center\">\n\n<img src=\"https://raw.githubusercontent.com/crybot/potatorch/main/docs/potatorch-banner.png\" width=\"100%\" role=\"img\">\n\n**PotaTorch is a lightweight PyTorch framework specifically designed to run on hardware with limited resources.**\n\n______________________________________________________________________\n\n<!-- [](https://pepy.tech/project/potatorch) -->\n[](https://pypi.org/project/potatorch/)\n[](https://badge.fury.io/py/potatorch)\n\n[](https://github.com/crybot/potatorch/blob/main/LICENSE)\n\n</div>\n\n### Installation\nPotaTorch is published on PyPI, you can install it through pip:\n```bash\npip install potatorch\n```\n\nor you can install it from sources:\n```bash\ngit clone --single-branch -b main https://github.com/crybot/potatorch\npip install -e potatorch\n````\n______________________________________________________________________\n\n### Minimal Working Example\nYou can run the following example directly from `examples/mlp.py` if you already have pytorch installed, or you can run it with docker through the provided scripts:\n```bash\n./build.sh && ./run.sh\n```\n\nThe example trains a feed forward network on a toy problem:\n```python\nimport torch\nfrom torch import nn\n\nfrom potatorch.training import TrainingLoop, make_optimizer\nfrom potatorch.callbacks import ProgressbarCallback\nfrom torch.utils.data import TensorDataset\n\n# Fix a seed for TrainingLoop to make non-deterministic operations such as\n# shuffling reproducible\nSEED = 42\ndevice = 'cuda'\n\nepochs = 100\nlr = 1e-4\n\n# Define your model as a pytorch Module\nmodel = nn.Sequential(nn.Linear(1, 128), nn.ReLU(), \n        nn.Linear(128, 128), nn.ReLU(),\n        nn.Linear(128, 1))\n\n# Create your dataset as a torch.data.Dataset\ndataset = TensorDataset(torch.arange(1000).view(1000, 1), torch.sin(torch.arange(1000)))\n\n# Provide a loss function and an optimizer\nloss_fn = torch.nn.MSELoss()\noptimizer = make_optimizer(torch.optim.Adam, lr=lr)\n\n# Construct a TrainingLoop object.\n# TrainingLoop handles the initialization of dataloaders, dataset splitting,\n# shuffling, mixed precision training, etc.\n# You can provide callback handles through the `callbacks` argument.\ntraining_loop = TrainingLoop(\n        dataset,\n        loss_fn,\n        optimizer,\n        train_p=0.8,\n        val_p=0.1,\n        test_p=0.1,\n        random_split=False,\n        batch_size=None,\n        shuffle=False,\n        device=device,\n        num_workers=0,\n        seed=SEED,\n        val_metrics={'l1': nn.L1Loss(), 'mse': nn.MSELoss()},\n        callbacks=[\n            ProgressbarCallback(epochs=epochs, width=20),\n            ]\n        )\n# Run the training loop\nmodel = training_loop.run(model, epochs=epochs)\n```\n______________________________________________________________________\n\n### Automatic Hyperparameters Optimization\nPotaTorch provides a basic set of utilities to perform hyperparameters optimization. You can choose among **grid search**, **random search** and **bayesian search**. All of them are provided by `potatorch.optimization.tuning.HyperOptimizer`. The following is a working example of a simple grid search on a toy problem. You can find the full script under `examples/grid_search.py`\n\n```python\ndef train(dataset, device, config):\n    \"\"\" Your usual training function that runs a TrainingLoop instance \"\"\"\n    SEED = 42\n    # `epochs` is a fixed hyperparameter; it won't change among runs\n    epochs = config['epochs']\n\n    # Define your model as a pytorch Module\n    model = nn.Sequential(nn.Linear(1, 128), nn.ReLU(), \n            nn.Linear(128, 128), nn.ReLU(),\n            nn.Linear(128, 1))\n\n    loss_fn = torch.nn.MSELoss()\n    # `lr` is a dynamic hyperparameter; it will change among runs\n    optimizer = make_optimizer(torch.optim.Adam, lr=config['lr'])\n\n    training_loop = TrainingLoop(\n            dataset,\n            loss_fn,\n            optimizer,\n            train_p=0.8,\n            val_p=0.1,\n            test_p=0.1,\n            random_split=False,\n            batch_size=None,\n            shuffle=False,\n            device=device,\n            num_workers=0,\n            seed=SEED,\n            val_metrics={'l1': nn.L1Loss(), 'mse': nn.MSELoss()},\n            callbacks=[\n                ProgressbarCallback(epochs=epochs, width=20),\n                ]\n            )\n    model = training_loop.run(model, epochs=epochs, verbose=1)\n    # Return a dictionary containing the training and validation metrics \n    # calculated during the last epoch of the loop\n    return training_loop.get_last_metrics()\n\n# Define your search configuration\nsearch_config = {\n        'method': 'grid',   # which search method to use: ['grid', 'bayes', 'random']\n        'metric': {\n            'name': 'val_loss', # the metric you're optimizing\n            'goal': 'minimize'  # whether you want to minimize or maximize it\n        },\n        'parameters': { # the set of hyperparameters you want to optimize\n            'lr': {\n                'values': [1e-2, 1e-3, 1e-4]    # a range of values for the grid search to try\n            }\n        },\n        'fixed': {      # fixed hyperparameters that won't change among runs\n            'epochs': 200\n        }\n    }\n\ndef main():\n    device = 'cuda'\n    dataset = TensorDataset(torch.arange(1000).view(1000, 1), torch.sin(torch.arange(1000)))\n    # Apply additional parameters to the train function to have f(config) -> {}\n    score_function = partial(train, dataset, device)\n    # Construct the hyperparameters optimizer\n    hyperoptimizer = HyperOptimizer(search_config)\n    # Run the optimization over the hyperparameters space\n    config, error = hyperoptimizer.optimize(score_function, return_error=True)\n    print('Best configuration found: {}\\n with error: {}'.format(config, error))\n```\n",
    "bugtrack_url": null,
    "license": null,
    "summary": "Lightweight high-level PyTorch framework that runs on potato machines",
    "version": "0.0.4",
    "project_urls": {
        "Bug Tracker": "https://github.com/crybot/potatorch/issues",
        "Homepage": "https://github.com/crybot/potatorch"
    },
    "split_keywords": [
        "deep learning",
        " framework",
        " limited resources",
        " machine learning",
        " python",
        " pytorch"
    ],
    "urls": [
        {
            "comment_text": "",
            "digests": {
                "blake2b_256": "08cf475c9d568bfb37397d80ae373c8254beeb9491623eb2d00e9eed9ac105de",
                "md5": "4976ea76550ae300a4e9238b28d4f5d4",
                "sha256": "13509b2cdc3053b0740f699c145653f0c2e4b3342899f2e31541f7c43219829e"
            },
            "downloads": -1,
            "filename": "potatorch-0.0.4-py3-none-any.whl",
            "has_sig": false,
            "md5_digest": "4976ea76550ae300a4e9238b28d4f5d4",
            "packagetype": "bdist_wheel",
            "python_version": "py3",
            "requires_python": ">=3.8",
            "size": 19628,
            "upload_time": "2024-05-01T08:51:42",
            "upload_time_iso_8601": "2024-05-01T08:51:42.763211Z",
            "url": "https://files.pythonhosted.org/packages/08/cf/475c9d568bfb37397d80ae373c8254beeb9491623eb2d00e9eed9ac105de/potatorch-0.0.4-py3-none-any.whl",
            "yanked": false,
            "yanked_reason": null
        },
        {
            "comment_text": "",
            "digests": {
                "blake2b_256": "4919f272c044c72cf8d95c019e3f202efcf0046462ba1ae21c0affb732ecd628",
                "md5": "4263e43d37389ba583265a6b06847b6d",
                "sha256": "27ad825165f5ea77d43fef3da40f33c3397afd142ac00bf04b8b96f893d44d14"
            },
            "downloads": -1,
            "filename": "potatorch-0.0.4.tar.gz",
            "has_sig": false,
            "md5_digest": "4263e43d37389ba583265a6b06847b6d",
            "packagetype": "sdist",
            "python_version": "source",
            "requires_python": ">=3.8",
            "size": 1329485,
            "upload_time": "2024-05-01T08:51:46",
            "upload_time_iso_8601": "2024-05-01T08:51:46.482113Z",
            "url": "https://files.pythonhosted.org/packages/49/19/f272c044c72cf8d95c019e3f202efcf0046462ba1ae21c0affb732ecd628/potatorch-0.0.4.tar.gz",
            "yanked": false,
            "yanked_reason": null
        }
    ],
    "upload_time": "2024-05-01 08:51:46",
    "github": true,
    "gitlab": false,
    "bitbucket": false,
    "codeberg": false,
    "github_user": "crybot",
    "github_project": "potatorch",
    "travis_ci": false,
    "coveralls": false,
    "github_actions": true,
    "lcname": "potatorch"
}