# SCONCE (Make Pytorch Development and Deployment Efficient)
This is a Pytorch Helper package aimed to aid the workflow of deep learning model development and deployment.
1. This packages has boiler plate defintions that can ease the development of torch model development
2. Pruning Techniques are being imported from Tomoco Package
3. Model Quantization and Deployment features are in the development pipeline which will be available for use soon.
## Package install:
```python
pip install sconce
```
## Usage:
```python
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
# Define your network
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 8, 3)
self.bn1 = nn.BatchNorm2d(8)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(8, 16, 3)
self.bn2 = nn.BatchNorm2d(16)
self.fc1 = nn.Linear(16*6*6, 32)
self.fc2 = nn.Linear(32, 10)
def forward(self, x):
x = self.pool(self.bn1(F.relu(self.conv1(x))))
x = self.pool(self.bn2(F.relu(self.conv2(x))))
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
# Make a Dict of Dataloader
image_size = 32
transforms = {
"train": Compose([
RandomCrop(image_size, padding=4),
RandomHorizontalFlip(),
ToTensor(),
]),
"test": ToTensor(),
}
dataset = {}
for split in ["train", "test"]:
dataset[split] = CIFAR10(
root="data/cifar10",
train=(split == "train"),
download=True,
transform=transforms[split],
)
dataloader = {}
for split in ['train', 'test']:
dataloader[split] = DataLoader(
dataset[split],
batch_size=512,
shuffle=(split == 'train'),
num_workers=0,
pin_memory=True,
)
# Make a cofig of the below parameters
class config:
criterion = nn.CrossEntropyLoss()
batch_size= 64
evaluate = True
save = False
goal = 'classficiation'
expt_name = 'test-net'
epochs = 10
learning_rate = 1e-4
prune = True
quantization = True
#Import Scone
import sconce
model = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=config.learning_rate)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
sconce = sconce(model, dataloader, criterion, optimizer, scheduler, config)
dummy_input = torch.randn(1, 3, 32, 32).to('cpu')
sconce.train()
print("Model Latency:",sconce.measure_latency(dummy_input))
```
```
train: 0%| | 0/98 [00:00<?, ?it/s]
Epoch:1 Train Loss: 0.106
Test Accuracy: 26.91 %
train: 0%| | 0/98 [00:00<?, ?it/s]
Epoch:2 Train Loss: 0.098
Test Accuracy: 28.36 %
train: 0%| | 0/98 [00:00<?, ?it/s]
Epoch:3 Train Loss: 0.097
Test Accuracy: 29.83 %
train: 0%| | 0/98 [00:00<?, ?it/s]
Epoch:4 Train Loss: 0.093
Test Accuracy: 34.97 %
train: 0%| | 0/98 [00:00<?, ?it/s]
Epoch:5 Train Loss: 0.088
Test Accuracy: 40.0 %
train: 0%| | 0/98 [00:00<?, ?it/s]
Epoch:6 Train Loss: 0.085
Test Accuracy: 40.97 %
train: 0%| | 0/98 [00:00<?, ?it/s]
Epoch:7 Train Loss: 0.085
Test Accuracy: 41.45 %
train: 0%| | 0/98 [00:00<?, ?it/s]
Epoch:8 Train Loss: 0.083
Test Accuracy: 43.11 %
train: 0%| | 0/98 [00:00<?, ?it/s]
Epoch:9 Train Loss: 0.080
Test Accuracy: 45.15 %
train: 0%| | 0/98 [00:00<?, ?it/s]
Epoch:10 Train Loss: 0.078
Test Accuracy: 46.04 %
Model Latency: 0.2
```
### To-Do
- [x] Universal Channel-Wise Pruning
- [x] Update Tutorials
- [+] Fine Grained Purning (In-Progress)
- [ ] Quantisation
- [ ] Universal AutoML package
- [ ] Introduction of Sparsification in Pipeline
Raw data
{
"_id": null,
"home_page": "https://github.com/satabios/sconce",
"name": "torch-sconce",
"maintainer": "",
"docs_url": null,
"requires_python": "",
"maintainer_email": "",
"keywords": "Development Pipeline,Deployment Pipeline,Torch,Pruning,Compression,Model Pruning",
"author": "Sathyaprakash Narayanan",
"author_email": "snaray17@ucsc.edu",
"download_url": "https://files.pythonhosted.org/packages/f0/fe/fe0a4fb3c85a79b10b1247256f63f31bb010c43803e4cb26d1cde6d9793f/torch_sconce-0.0.5.tar.gz",
"platform": null,
"description": "# SCONCE (Make Pytorch Development and Deployment Efficient)\n\nThis is a Pytorch Helper package aimed to aid the workflow of deep learning model development and deployment. \n\n\n1. This packages has boiler plate defintions that can ease the development of torch model development\n2. Pruning Techniques are being imported from Tomoco Package\n3. Model Quantization and Deployment features are in the development pipeline which will be available for use soon.\n## Package install:\n\n```python\n\npip install sconce\n\n```\n\n\n## Usage:\n\n```python\nimport torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# Define your network\n\nclass Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 8, 3)\n self.bn1 = nn.BatchNorm2d(8)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(8, 16, 3)\n self.bn2 = nn.BatchNorm2d(16)\n self.fc1 = nn.Linear(16*6*6, 32)\n self.fc2 = nn.Linear(32, 10)\n\n def forward(self, x):\n x = self.pool(self.bn1(F.relu(self.conv1(x))))\n x = self.pool(self.bn2(F.relu(self.conv2(x))))\n x = torch.flatten(x, 1)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n \n\n# Make a Dict of Dataloader\n\nimage_size = 32\ntransforms = {\n \"train\": Compose([\n RandomCrop(image_size, padding=4),\n RandomHorizontalFlip(),\n ToTensor(),\n ]),\n \"test\": ToTensor(),\n}\ndataset = {}\nfor split in [\"train\", \"test\"]:\n dataset[split] = CIFAR10(\n root=\"data/cifar10\",\n train=(split == \"train\"),\n download=True,\n transform=transforms[split],\n )\ndataloader = {}\nfor split in ['train', 'test']:\n dataloader[split] = DataLoader(\n dataset[split],\n batch_size=512,\n shuffle=(split == 'train'),\n num_workers=0,\n pin_memory=True,\n )\n\n# Make a cofig of the below parameters\nclass config:\n criterion = nn.CrossEntropyLoss()\n batch_size= 64\n evaluate = True\n save = False\n goal = 'classficiation' \n expt_name = 'test-net'\n epochs = 10\n learning_rate = 1e-4\n prune = True\n quantization = True\n\n\n#Import Scone\n\nimport sconce\n\nmodel = Net()\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=config.learning_rate)\nscheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)\n \nsconce = sconce(model, dataloader, criterion, optimizer, scheduler, config)\ndummy_input = torch.randn(1, 3, 32, 32).to('cpu')\nsconce.train()\nprint(\"Model Latency:\",sconce.measure_latency(dummy_input))\n\n```\n\n```\n\n\n train: 0%| | 0/98 [00:00<?, ?it/s]\n\n\n Epoch:1 Train Loss: 0.106\n Test Accuracy: 26.91 %\n\n\n\n train: 0%| | 0/98 [00:00<?, ?it/s]\n\n\n Epoch:2 Train Loss: 0.098\n Test Accuracy: 28.36 %\n\n\n\n train: 0%| | 0/98 [00:00<?, ?it/s]\n\n\n Epoch:3 Train Loss: 0.097\n Test Accuracy: 29.83 %\n\n\n\n train: 0%| | 0/98 [00:00<?, ?it/s]\n\n\n Epoch:4 Train Loss: 0.093\n Test Accuracy: 34.97 %\n\n\n\n train: 0%| | 0/98 [00:00<?, ?it/s]\n\n\n Epoch:5 Train Loss: 0.088\n Test Accuracy: 40.0 %\n\n\n\n train: 0%| | 0/98 [00:00<?, ?it/s]\n\n\n Epoch:6 Train Loss: 0.085\n Test Accuracy: 40.97 %\n\n\n\n train: 0%| | 0/98 [00:00<?, ?it/s]\n\n\n Epoch:7 Train Loss: 0.085\n Test Accuracy: 41.45 %\n\n\n\n train: 0%| | 0/98 [00:00<?, ?it/s]\n\n\n Epoch:8 Train Loss: 0.083\n Test Accuracy: 43.11 %\n\n\n\n train: 0%| | 0/98 [00:00<?, ?it/s]\n\n\n Epoch:9 Train Loss: 0.080\n Test Accuracy: 45.15 %\n\n\n\n train: 0%| | 0/98 [00:00<?, ?it/s]\n\n\n Epoch:10 Train Loss: 0.078\n Test Accuracy: 46.04 %\n\n\n\n\n\n\n Model Latency: 0.2\n\n```\n\n\n\n### To-Do\n\n- [x] Universal Channel-Wise Pruning\n- [x] Update Tutorials\n- [+] Fine Grained Purning (In-Progress)\n- [ ] Quantisation\n- [ ] Universal AutoML package\n- [ ] Introduction of Sparsification in Pipeline\n\n",
"bugtrack_url": null,
"license": "MIT",
"summary": "torch_sconce: torch helper",
"version": "0.0.5",
"project_urls": {
"Download": "https://pypi.org/project/torch-sconce/",
"Homepage": "https://github.com/satabios/sconce"
},
"split_keywords": [
"development pipeline",
"deployment pipeline",
"torch",
"pruning",
"compression",
"model pruning"
],
"urls": [
{
"comment_text": "",
"digests": {
"blake2b_256": "c274800dcf85700c8d4631c4fcb019f3a114f1c0a263e3c9065b09b395a34e9b",
"md5": "9d8aeb395d249f4c3b859545e3c7a2b6",
"sha256": "d427a8cba5507ed7a0bd65e3011bd356e0b90b207e04c4fc9e214be54d521bd9"
},
"downloads": -1,
"filename": "torch_sconce-0.0.5-py3-none-any.whl",
"has_sig": false,
"md5_digest": "9d8aeb395d249f4c3b859545e3c7a2b6",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": null,
"size": 5671,
"upload_time": "2023-07-19T05:32:30",
"upload_time_iso_8601": "2023-07-19T05:32:30.623810Z",
"url": "https://files.pythonhosted.org/packages/c2/74/800dcf85700c8d4631c4fcb019f3a114f1c0a263e3c9065b09b395a34e9b/torch_sconce-0.0.5-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": "",
"digests": {
"blake2b_256": "f0fefe0a4fb3c85a79b10b1247256f63f31bb010c43803e4cb26d1cde6d9793f",
"md5": "9ded5e39c063f7909a2e01cdb40eb108",
"sha256": "563f2f159fcd46fa511c51405cad3f38b0c0947d8f48a31547530a4b20812c53"
},
"downloads": -1,
"filename": "torch_sconce-0.0.5.tar.gz",
"has_sig": false,
"md5_digest": "9ded5e39c063f7909a2e01cdb40eb108",
"packagetype": "sdist",
"python_version": "source",
"requires_python": null,
"size": 5433,
"upload_time": "2023-07-19T05:32:31",
"upload_time_iso_8601": "2023-07-19T05:32:31.683669Z",
"url": "https://files.pythonhosted.org/packages/f0/fe/fe0a4fb3c85a79b10b1247256f63f31bb010c43803e4cb26d1cde6d9793f/torch_sconce-0.0.5.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2023-07-19 05:32:31",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "satabios",
"github_project": "sconce",
"travis_ci": false,
"coveralls": false,
"github_actions": false,
"requirements": [
{
"name": "certifi",
"specs": [
[
"==",
"2023.5.7"
]
]
},
{
"name": "charset-normalizer",
"specs": [
[
"==",
"3.2.0"
]
]
},
{
"name": "cmake",
"specs": [
[
"==",
"3.26.4"
]
]
},
{
"name": "contourpy",
"specs": [
[
"==",
"1.1.0"
]
]
},
{
"name": "cycler",
"specs": [
[
"==",
"0.11.0"
]
]
},
{
"name": "filelock",
"specs": [
[
"==",
"3.12.2"
]
]
},
{
"name": "fonttools",
"specs": [
[
"==",
"4.41.0"
]
]
},
{
"name": "idna",
"specs": [
[
"==",
"3.4"
]
]
},
{
"name": "Jinja2",
"specs": [
[
"==",
"3.1.2"
]
]
},
{
"name": "kiwisolver",
"specs": [
[
"==",
"1.4.4"
]
]
},
{
"name": "lit",
"specs": [
[
"==",
"16.0.6"
]
]
},
{
"name": "MarkupSafe",
"specs": [
[
"==",
"2.1.3"
]
]
},
{
"name": "matplotlib",
"specs": [
[
"==",
"3.7.2"
]
]
},
{
"name": "mpmath",
"specs": [
[
"==",
"1.3.0"
]
]
},
{
"name": "networkx",
"specs": [
[
"==",
"3.1"
]
]
},
{
"name": "numpy",
"specs": [
[
"==",
"1.25.1"
]
]
},
{
"name": "nvidia-cublas-cu11",
"specs": [
[
"==",
"11.10.3.66"
]
]
},
{
"name": "nvidia-cuda-cupti-cu11",
"specs": [
[
"==",
"11.7.101"
]
]
},
{
"name": "nvidia-cuda-nvrtc-cu11",
"specs": [
[
"==",
"11.7.99"
]
]
},
{
"name": "nvidia-cuda-runtime-cu11",
"specs": [
[
"==",
"11.7.99"
]
]
},
{
"name": "nvidia-cudnn-cu11",
"specs": [
[
"==",
"8.5.0.96"
]
]
},
{
"name": "nvidia-cufft-cu11",
"specs": [
[
"==",
"10.9.0.58"
]
]
},
{
"name": "nvidia-curand-cu11",
"specs": [
[
"==",
"10.2.10.91"
]
]
},
{
"name": "nvidia-cusolver-cu11",
"specs": [
[
"==",
"11.4.0.1"
]
]
},
{
"name": "nvidia-cusparse-cu11",
"specs": [
[
"==",
"11.7.4.91"
]
]
},
{
"name": "nvidia-nccl-cu11",
"specs": [
[
"==",
"2.14.3"
]
]
},
{
"name": "nvidia-nvtx-cu11",
"specs": [
[
"==",
"11.7.91"
]
]
},
{
"name": "packaging",
"specs": [
[
"==",
"23.1"
]
]
},
{
"name": "Pillow",
"specs": [
[
"==",
"10.0.0"
]
]
},
{
"name": "pyparsing",
"specs": [
[
"==",
"3.0.9"
]
]
},
{
"name": "python-dateutil",
"specs": [
[
"==",
"2.8.2"
]
]
},
{
"name": "requests",
"specs": [
[
"==",
"2.31.0"
]
]
},
{
"name": "six",
"specs": [
[
"==",
"1.16.0"
]
]
},
{
"name": "sympy",
"specs": [
[
"==",
"1.12"
]
]
},
{
"name": "torch",
"specs": [
[
"==",
"2.0.1"
]
]
},
{
"name": "torchprofile",
"specs": [
[
"==",
"0.0.4"
]
]
},
{
"name": "torchvision",
"specs": [
[
"==",
"0.15.2"
]
]
},
{
"name": "tqdm",
"specs": [
[
"==",
"4.65.0"
]
]
},
{
"name": "triton",
"specs": [
[
"==",
"2.0.0"
]
]
},
{
"name": "typing_extensions",
"specs": [
[
"==",
"4.7.1"
]
]
},
{
"name": "urllib3",
"specs": [
[
"==",
"2.0.3"
]
]
}
],
"lcname": "torch-sconce"
}