![CI](https://github.com/ywatanabe1989/torch_fn/actions/workflows/pip_install.yml/badge.svg)
![CI](https://github.com/ywatanabe1989/torch_fn/actions/workflows/run_example.yml/badge.svg)
## Installation
``` bash
$ pip install torch_fn
```
## Usage
``` python
from torch_fn import torch_fn
import numpy as np
import torch.nn.functional as F
@torch_fn
def torch_softmax(*args, **kwargs):
return F.softmax(*args, **kwargs)
def custom_print(x):
print(type(x), x)
# Test the decorator with different input types
x = [1, 2, 3]
x_list = x
x_tensor = torch.tensor(x).float()
x_tensor_cuda = torch.tensor(x).float().cuda()
x_array = np.array(x)
x_df = pd.DataFrame({"col1": x})
custom_print(torch_softmax(x_list, dim=-1))
# /home/ywatanabe/proj/torch_fn/src/torch_fn/_torch_fn.py:57: UserWarning: Converted from <class 'list'> to <class 'torch.Tensor'> (cuda:0)
# warnings.warn(
# <class 'numpy.ndarray'> [0.09003057 0.24472848 0.6652409 ]
custom_print(torch_softmax(x_array, dim=-1))
# /home/ywatanabe/proj/torch_fn/src/torch_fn/_torch_fn.py:57: UserWarning: Converted from <class 'numpy.ndarray'> to <class 'torch.Tensor'> (cuda:0)
# warnings.warn(
# <class 'numpy.ndarray'> [0.09003057 0.24472848 0.6652409 ]
custom_print(torch_softmax(x_df, dim=-1))
# /home/ywatanabe/proj/torch_fn/src/torch_fn/_torch_fn.py:49: UserWarning: Converted from <class 'pandas.core.frame.DataFrame'> to <class 'torch.Tensor'> (cuda:0)
# warnings.warn(
# <class 'numpy.ndarray'> [0.09003057 0.24472848 0.6652409 ]
custom_print(torch_softmax(x_tensor, dim=-1))
# <class 'torch.Tensor'> tensor([0.0900, 0.2447, 0.6652])
custom_print(torch_softmax(x_tensor_cuda, dim=-1))
# <class 'torch.Tensor'> tensor([0.0900, 0.2447, 0.6652], device='cuda:0')
```
Raw data
{
"_id": null,
"home_page": "https://github.com/ywatanabe1989/torch_fn",
"name": "torch-fn",
"maintainer": null,
"docs_url": null,
"requires_python": ">=3.6",
"maintainer_email": null,
"keywords": "torch, GPU, numpy, pandas",
"author": "ywatanabe1989",
"author_email": "ywata1989@gmail.com",
"download_url": "https://files.pythonhosted.org/packages/c9/13/75bdfa2b2bd107d696f6c68451881f18179e3c0f4cbf4b4e1dd0436bc7f7/torch_fn-1.0.0.tar.gz",
"platform": null,
"description": "![CI](https://github.com/ywatanabe1989/torch_fn/actions/workflows/pip_install.yml/badge.svg)\n![CI](https://github.com/ywatanabe1989/torch_fn/actions/workflows/run_example.yml/badge.svg)\n\n## Installation\n``` bash\n$ pip install torch_fn\n```\n\n## Usage\n\n``` python\nfrom torch_fn import torch_fn\n\nimport numpy as np\nimport torch.nn.functional as F\n\n@torch_fn\ndef torch_softmax(*args, **kwargs):\n return F.softmax(*args, **kwargs)\n\ndef custom_print(x):\n print(type(x), x)\n\n# Test the decorator with different input types\nx = [1, 2, 3]\nx_list = x\nx_tensor = torch.tensor(x).float()\nx_tensor_cuda = torch.tensor(x).float().cuda()\nx_array = np.array(x)\nx_df = pd.DataFrame({\"col1\": x})\n\ncustom_print(torch_softmax(x_list, dim=-1))\n# /home/ywatanabe/proj/torch_fn/src/torch_fn/_torch_fn.py:57: UserWarning: Converted from <class 'list'> to <class 'torch.Tensor'> (cuda:0)\n# warnings.warn(\n# <class 'numpy.ndarray'> [0.09003057 0.24472848 0.6652409 ]\n\ncustom_print(torch_softmax(x_array, dim=-1))\n# /home/ywatanabe/proj/torch_fn/src/torch_fn/_torch_fn.py:57: UserWarning: Converted from <class 'numpy.ndarray'> to <class 'torch.Tensor'> (cuda:0)\n# warnings.warn(\n# <class 'numpy.ndarray'> [0.09003057 0.24472848 0.6652409 ]\n\ncustom_print(torch_softmax(x_df, dim=-1))\n# /home/ywatanabe/proj/torch_fn/src/torch_fn/_torch_fn.py:49: UserWarning: Converted from <class 'pandas.core.frame.DataFrame'> to <class 'torch.Tensor'> (cuda:0)\n# warnings.warn(\n# <class 'numpy.ndarray'> [0.09003057 0.24472848 0.6652409 ]\n\ncustom_print(torch_softmax(x_tensor, dim=-1))\n# <class 'torch.Tensor'> tensor([0.0900, 0.2447, 0.6652])\n\ncustom_print(torch_softmax(x_tensor_cuda, dim=-1))\n# <class 'torch.Tensor'> tensor([0.0900, 0.2447, 0.6652], device='cuda:0')\n```\n\n\n",
"bugtrack_url": null,
"license": "MIT",
"summary": "A decorator for seamless PyTorch calculations (primarily on CUDA) from numpy.ndarray and pd.DataFrame.",
"version": "1.0.0",
"project_urls": {
"Homepage": "https://github.com/ywatanabe1989/torch_fn"
},
"split_keywords": [
"torch",
" gpu",
" numpy",
" pandas"
],
"urls": [
{
"comment_text": "",
"digests": {
"blake2b_256": "1f2c0c7b26b0a4f7df134c7bb74d9fa72e4b1cf8f9b969871bc31bbc189d26d4",
"md5": "b04ae0971b752b94686d64f0d15abc45",
"sha256": "8b20ce51c43839eca24fde8bd665f64d98b9e1cb4a641240045a641c7ec24195"
},
"downloads": -1,
"filename": "torch_fn-1.0.0-py3-none-any.whl",
"has_sig": false,
"md5_digest": "b04ae0971b752b94686d64f0d15abc45",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": ">=3.6",
"size": 4707,
"upload_time": "2024-04-07T10:33:57",
"upload_time_iso_8601": "2024-04-07T10:33:57.821232Z",
"url": "https://files.pythonhosted.org/packages/1f/2c/0c7b26b0a4f7df134c7bb74d9fa72e4b1cf8f9b969871bc31bbc189d26d4/torch_fn-1.0.0-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": "",
"digests": {
"blake2b_256": "c91375bdfa2b2bd107d696f6c68451881f18179e3c0f4cbf4b4e1dd0436bc7f7",
"md5": "e1c9fa77997267720ccf200bd39e16a5",
"sha256": "b817d63e8fdcce403173033d90f0f3da72384b4db77654dbc2716de2e0bc2075"
},
"downloads": -1,
"filename": "torch_fn-1.0.0.tar.gz",
"has_sig": false,
"md5_digest": "e1c9fa77997267720ccf200bd39e16a5",
"packagetype": "sdist",
"python_version": "source",
"requires_python": ">=3.6",
"size": 4457,
"upload_time": "2024-04-07T10:33:59",
"upload_time_iso_8601": "2024-04-07T10:33:59.482374Z",
"url": "https://files.pythonhosted.org/packages/c9/13/75bdfa2b2bd107d696f6c68451881f18179e3c0f4cbf4b4e1dd0436bc7f7/torch_fn-1.0.0.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2024-04-07 10:33:59",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "ywatanabe1989",
"github_project": "torch_fn",
"github_not_found": true,
"lcname": "torch-fn"
}