# cjm-torchvision-tfms
<!-- WARNING: THIS FILE WAS AUTOGENERATED! DO NOT EDIT! -->
## Install
``` sh
pip install cjm_torchvision_tfms
```
## How to use
``` python
from PIL import Image
img_path = './images/call-hand-gesture.png'
# Open the associated image file as a RGB image
sample_img = Image.open(img_path).convert('RGB')
# Print the dimensions of the image
print(f"Image Dims: {sample_img.size}")
# Show the image
sample_img
```
Image Dims: (384, 512)
![](index_files/figure-commonmark/cell-2-output-2.png)
``` python
from cjm_torchvision_tfms.core import ResizeMax, PadSquare, CustomTrivialAugmentWide
import torch
from torchvision import transforms
from cjm_pytorch_utils.core import tensor_to_pil
from cjm_pil_utils.core import stack_imgs
```
``` python
target_sz = 384
```
``` python
print(f"Source image: {sample_img.size}")
# Create a `ResizeMax` object
resize_max = ResizeMax(max_sz=target_sz)
# Convert the cropped image to a tensor
img_tensor = transforms.PILToTensor()(sample_img)[None]
print(f"Image tensor: {img_tensor.shape}")
# Resize the tensor
resized_tensor = resize_max(img_tensor)
print(f"Padded tensor: {resized_tensor.shape}")
# Display the updated image
tensor_to_pil(resized_tensor)
```
Source image: (384, 512)
Image tensor: torch.Size([1, 3, 512, 384])
Padded tensor: torch.Size([1, 3, 384, 288])
![](index_files/figure-commonmark/cell-6-output-2.png)
``` python
print(f"Resized tensor: {resized_tensor.shape}")
# Create a `PadSquare` object
pad_square = PadSquare(shift=True)
# Pad the tensor
padded_tensor = pad_square(resized_tensor)
print(f"Padded tensor: {padded_tensor.shape}")
# Display the updated image
stack_imgs([tensor_to_pil(pad_square(resized_tensor)) for i in range(3)])
```
Resized tensor: torch.Size([3, 384, 288])
Padded tensor: torch.Size([3, 384, 384])
![](index_files/figure-commonmark/cell-8-output-2.png)
``` python
num_bins = 31
custom_augmentation_space = {
# Identity operation doesn't change the image
"Identity": (torch.tensor(0.0), False),
# Distort the image along the x or y axis, respectively.
"ShearX": (torch.linspace(0.0, 0.25, num_bins), True),
"ShearY": (torch.linspace(0.0, 0.25, num_bins), True),
# Move the image along the x or y axis, respectively.
"TranslateX": (torch.linspace(0.0, 32.0, num_bins), True),
"TranslateY": (torch.linspace(0.0, 32.0, num_bins), True),
# Rotate operation: rotates the image.
"Rotate": (torch.linspace(0.0, 45.0, num_bins), True),
# Adjust brightness, color, contrast,and sharpness respectively.
"Brightness": (torch.linspace(0.0, 0.75, num_bins), True),
"Color": (torch.linspace(0.0, 0.99, num_bins), True),
"Contrast": (torch.linspace(0.0, 0.99, num_bins), True),
"Sharpness": (torch.linspace(0.0, 0.99, num_bins), True),
# Reduce the number of bits used to express the color in each channel of the image.
"Posterize": (8 - (torch.arange(num_bins) / ((num_bins - 1) / 6)).round().int(), False),
# Invert all pixel values above a threshold.
"Solarize": (torch.linspace(255.0, 0.0, num_bins), False),
# Maximize the image contrast by setting the darkest color to black and the lightest to white.
"AutoContrast": (torch.tensor(0.0), False),
# Equalize the image histogram to improve its contrast.
"Equalize": (torch.tensor(0.0), False),
}
# Create a `CustomTrivialAugmentWide` object
trivial_aug = CustomTrivialAugmentWide(op_meta=custom_augmentation_space)
# Pad the tensor
aug_tensor = trivial_aug(resized_tensor)
print(f"Augmented tensor: {aug_tensor.shape}")
# Display the updated image
stack_imgs([tensor_to_pil(trivial_aug(resized_tensor)) for i in range(3)])
```
Augmented tensor: torch.Size([3, 384, 288])
![](index_files/figure-commonmark/cell-10-output-2.png)
Raw data
{
"_id": null,
"home_page": "https://github.com/cj-mills/cjm-torchvision-tfms",
"name": "cjm-torchvision-tfms",
"maintainer": null,
"docs_url": null,
"requires_python": ">=3.10",
"maintainer_email": null,
"keywords": "nbdev jupyter notebook python",
"author": "Christian Mills",
"author_email": "millscj@protonmail.com",
"download_url": "https://files.pythonhosted.org/packages/4a/35/f0a934fe0a30362e91bd8b95044a739caea3f5c7e73233ea0a97ab73e0a5/cjm_torchvision_tfms-0.0.15.tar.gz",
"platform": null,
"description": "# cjm-torchvision-tfms\n\n\n<!-- WARNING: THIS FILE WAS AUTOGENERATED! DO NOT EDIT! -->\n\n## Install\n\n``` sh\npip install cjm_torchvision_tfms\n```\n\n## How to use\n\n``` python\nfrom PIL import Image\n\nimg_path = './images/call-hand-gesture.png'\n\n# Open the associated image file as a RGB image\nsample_img = Image.open(img_path).convert('RGB')\n\n# Print the dimensions of the image\nprint(f\"Image Dims: {sample_img.size}\")\n\n# Show the image\nsample_img\n```\n\n Image Dims: (384, 512)\n\n![](index_files/figure-commonmark/cell-2-output-2.png)\n\n``` python\nfrom cjm_torchvision_tfms.core import ResizeMax, PadSquare, CustomTrivialAugmentWide\n\nimport torch\nfrom torchvision import transforms\nfrom cjm_pytorch_utils.core import tensor_to_pil\nfrom cjm_pil_utils.core import stack_imgs\n```\n\n``` python\ntarget_sz = 384\n```\n\n``` python\nprint(f\"Source image: {sample_img.size}\")\n\n# Create a `ResizeMax` object\nresize_max = ResizeMax(max_sz=target_sz)\n\n# Convert the cropped image to a tensor\nimg_tensor = transforms.PILToTensor()(sample_img)[None]\nprint(f\"Image tensor: {img_tensor.shape}\")\n\n# Resize the tensor\nresized_tensor = resize_max(img_tensor)\nprint(f\"Padded tensor: {resized_tensor.shape}\")\n\n# Display the updated image\ntensor_to_pil(resized_tensor)\n```\n\n Source image: (384, 512)\n Image tensor: torch.Size([1, 3, 512, 384])\n Padded tensor: torch.Size([1, 3, 384, 288])\n\n![](index_files/figure-commonmark/cell-6-output-2.png)\n\n``` python\nprint(f\"Resized tensor: {resized_tensor.shape}\")\n\n# Create a `PadSquare` object\npad_square = PadSquare(shift=True)\n\n# Pad the tensor\npadded_tensor = pad_square(resized_tensor)\nprint(f\"Padded tensor: {padded_tensor.shape}\")\n\n# Display the updated image\nstack_imgs([tensor_to_pil(pad_square(resized_tensor)) for i in range(3)])\n```\n\n Resized tensor: torch.Size([3, 384, 288])\n Padded tensor: torch.Size([3, 384, 384])\n\n![](index_files/figure-commonmark/cell-8-output-2.png)\n\n``` python\nnum_bins = 31\n\ncustom_augmentation_space = {\n # Identity operation doesn't change the image\n \"Identity\": (torch.tensor(0.0), False),\n \n # Distort the image along the x or y axis, respectively.\n \"ShearX\": (torch.linspace(0.0, 0.25, num_bins), True),\n \"ShearY\": (torch.linspace(0.0, 0.25, num_bins), True),\n\n # Move the image along the x or y axis, respectively.\n \"TranslateX\": (torch.linspace(0.0, 32.0, num_bins), True),\n \"TranslateY\": (torch.linspace(0.0, 32.0, num_bins), True),\n\n # Rotate operation: rotates the image.\n \"Rotate\": (torch.linspace(0.0, 45.0, num_bins), True),\n\n # Adjust brightness, color, contrast,and sharpness respectively.\n \"Brightness\": (torch.linspace(0.0, 0.75, num_bins), True),\n \"Color\": (torch.linspace(0.0, 0.99, num_bins), True),\n \"Contrast\": (torch.linspace(0.0, 0.99, num_bins), True),\n \"Sharpness\": (torch.linspace(0.0, 0.99, num_bins), True),\n\n # Reduce the number of bits used to express the color in each channel of the image.\n \"Posterize\": (8 - (torch.arange(num_bins) / ((num_bins - 1) / 6)).round().int(), False),\n\n # Invert all pixel values above a threshold.\n \"Solarize\": (torch.linspace(255.0, 0.0, num_bins), False),\n\n # Maximize the image contrast by setting the darkest color to black and the lightest to white.\n \"AutoContrast\": (torch.tensor(0.0), False),\n\n # Equalize the image histogram to improve its contrast.\n \"Equalize\": (torch.tensor(0.0), False),\n}\n\n# Create a `CustomTrivialAugmentWide` object\ntrivial_aug = CustomTrivialAugmentWide(op_meta=custom_augmentation_space)\n\n# Pad the tensor\naug_tensor = trivial_aug(resized_tensor)\nprint(f\"Augmented tensor: {aug_tensor.shape}\")\n\n# Display the updated image\nstack_imgs([tensor_to_pil(trivial_aug(resized_tensor)) for i in range(3)])\n```\n\n Augmented tensor: torch.Size([3, 384, 288])\n\n![](index_files/figure-commonmark/cell-10-output-2.png)\n",
"bugtrack_url": null,
"license": "Apache Software License 2.0",
"summary": "Some custom Torchvision tranforms.",
"version": "0.0.15",
"project_urls": {
"Homepage": "https://github.com/cj-mills/cjm-torchvision-tfms"
},
"split_keywords": [
"nbdev",
"jupyter",
"notebook",
"python"
],
"urls": [
{
"comment_text": "",
"digests": {
"blake2b_256": "c7aad273b33862a3b72aea822d0d6e65e7bdc71f2cbdce8e293b570579e54926",
"md5": "556b7ec35406bf025b58a3cdb0797744",
"sha256": "a6d1ac67cf8b34c4d8132c1989931574797c4d1017ea7f41703aa3259d75da57"
},
"downloads": -1,
"filename": "cjm_torchvision_tfms-0.0.15-py3-none-any.whl",
"has_sig": false,
"md5_digest": "556b7ec35406bf025b58a3cdb0797744",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": ">=3.10",
"size": 33573,
"upload_time": "2025-01-21T02:37:11",
"upload_time_iso_8601": "2025-01-21T02:37:11.358639Z",
"url": "https://files.pythonhosted.org/packages/c7/aa/d273b33862a3b72aea822d0d6e65e7bdc71f2cbdce8e293b570579e54926/cjm_torchvision_tfms-0.0.15-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": "",
"digests": {
"blake2b_256": "4a35f0a934fe0a30362e91bd8b95044a739caea3f5c7e73233ea0a97ab73e0a5",
"md5": "adf6d4325f6a905d56762ef1ad5b89fb",
"sha256": "9fbe6586cf65490486c00ead28fbd9ad525558f7dcde76040b3c548cfaa4f73f"
},
"downloads": -1,
"filename": "cjm_torchvision_tfms-0.0.15.tar.gz",
"has_sig": false,
"md5_digest": "adf6d4325f6a905d56762ef1ad5b89fb",
"packagetype": "sdist",
"python_version": "source",
"requires_python": ">=3.10",
"size": 27803,
"upload_time": "2025-01-21T02:37:12",
"upload_time_iso_8601": "2025-01-21T02:37:12.996803Z",
"url": "https://files.pythonhosted.org/packages/4a/35/f0a934fe0a30362e91bd8b95044a739caea3f5c7e73233ea0a97ab73e0a5/cjm_torchvision_tfms-0.0.15.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2025-01-21 02:37:12",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "cj-mills",
"github_project": "cjm-torchvision-tfms",
"travis_ci": false,
"coveralls": false,
"github_actions": true,
"lcname": "cjm-torchvision-tfms"
}