torchlie


Nametorchlie JSON
Version 0.1.0 PyPI version JSON
download
home_pagehttps://github.com/facebookresearch/theseus/lie
SummaryTorch extension for differentiable Lie groups.
upload_time2023-06-28 01:56:16
maintainer
docs_urlNone
authorMeta Research
requires_python>=3.8
license
keywords lie groups differentiable optimization
VCS
bugtrack_url
requirements No requirements were recorded.
Travis-CI No Travis.
coveralls test coverage No coveralls.
            # torchlie

<p align="center">
    <!-- License -->
    <a href="https://github.com/facebookresearch/theseus/blob/main/torchlie/LICENSE">
        <img src="https://img.shields.io/badge/license-MIT-blue.svg" alt="License" height="20">
    </a>
    <!-- pypi -->
    <a href="https://pypi.org/project/torchlie/">
        <img src="https://img.shields.io/pypi/v/torchlie" alt="pypi"
        heigh="20">
    <!-- Downloads counter -->
    <a href="https://pypi.org/project/torchlie/">
        <img src="https://pepy.tech/badge/torchlie" alt="PyPi Downloads" height="20">
    </a>
    <!-- Python -->
    <a href="https://www.python.org/downloads/release/">
        <img src="https://img.shields.io/badge/python-3.8%20%7C%203.9%20%7C%203.10-blue.svg" alt="Python" height="20">
    </a>
    <!-- Pre-commit -->
    <a href="https://github.com/pre-commit/pre-commit">
        <img src="https://img.shields.io/badge/pre--commit-enabled-green?logo=pre-commit&logoColor=white" alt="pre-commit" height="20">
    </a>
    <!-- Black -->
    <a href="https://github.com/psf/black">
        <img src="https://img.shields.io/badge/code%20style-black-000000.svg" alt="black" height="20">
    </a>
    <!-- PRs -->
    <a href="https://github.com/facebookresearch/theseus/blob/main/CONTRIBUTING.md">
        <img src="https://img.shields.io/badge/PRs-welcome-green.svg" alt="PRs" height="20">
    </a>
</p>

<p align="center">
    <i>A library for differentiable Lie groups</i>
</p>

-----

## Getting Started

### Prerequisites
- We *strongly* recommend you install torchlie in a venv or conda environment with Python 3.8-3.10.
- torchlie requires `torch` installation. To install for your particular CPU/CUDA configuration, follow the instructions in the PyTorch [website](https://pytorch.org/get-started/locally/).

### Installing

- **pypi**
    ```bash
    pip install torchlie
    ```

- #### **From source**
    The simplest way to install torchlie from source is by running the following
    ```bash
    git clone https://github.com/facebookresearch/theseus.git && cd theseus/torchlie
    pip install -e .
    ```
    If you are interested in contributing to torchlie, also install
    ```bash
    pip install -r ../requirements/dev.txt
    pre-commit install
    ```
    and follow the more detailed instructions in [CONTRIBUTING](https://github.com/facebookresearch/theseus/blob/main/CONTRIBUTING.md).


## Example

The example below is also available as a [script](https://github.com/facebookresearch/theseus/blob/main/examples/torchlie_api.py).

```python
import torch

import torchlie as lie
import torchlie.functional as lieF

batch_size = 5

# ### Lie Tensor creation functions
g1 = lie.SE3.rand(batch_size, requires_grad=True)
print(f"Created SE3 tensor with shape {g1.shape}")
g2 = g1.clone()

# Identity element
i1 = lie.SO3.identity(2)
i2 = lie.SE3.identity(2)
print("SO3 identity", i1, i1.shape)
print("SE3 identity", i2, i2.shape)

# Indexing
g1_slice = g1[2:4]
assert g1_slice.shape == (2, 3, 4)
torch.testing.assert_close(g1_slice._t, g1._t[2:4])  # type: ignore
try:
    bad = g1[3, 2]
except NotImplementedError:
    print("INDEXING ERROR: Can only slice the first dimension for now.")

# ## Different constructors
g3_data = lieF.SO3.rand(5, requires_grad=True)  # this is a regular tensor with SO3 data

# Can create from a tensor as long as it's consistent with the desired ltype
g3 = lie.from_tensor(g3_data, lie.SO3)  # keeps grad history
assert g3.grad_fn is not None
try:
    x = lie.from_tensor(torch.zeros(1, 3, 3), lie.SO3)
except ValueError as e:
    print(f"ERROR: {e}")


def is_shared(t1, t2):  # utility to check if memory is shared
    return t1.storage().data_ptr() == t2.storage().data_ptr()


# # Let's check different copy vs no-copy options
# -- lie.SO3() lie.SE3()
g3_leaf = lie.SO3(g3_data)  # creates a leaf tensor and copies data
assert g3_leaf.grad_fn is None
assert not is_shared(g3_leaf, g3_data)

# -- lie.LieTensor() constructor is equivalent to lie.SO3()
g3_leaf_2 = lie.LieTensor(g3_data, lie.SO3)
assert g3_leaf_2.grad_fn is None
assert not is_shared(g3_leaf_2, g3_data)


# -- as_lietensor()
g4 = lie.as_lietensor(g3_data, lie.SO3)
assert is_shared(g3_data, g4)  # shares storage if possible
assert g4.grad_fn is not None  # result is not a leaf tensor
# Calling with a LieTensor returns the same tensor...
g5 = lie.as_lietensor(g3, lie.SO3)
assert g5 is g3
# ... unless dtype or device is different
g5_double = lie.as_lietensor(g3, lie.SO3, dtype=torch.double)
assert g5_double is not g3
assert not is_shared(g5_double, g3)

# -- cast()
g6 = lie.cast(g3_data, lie.SO3)  # alias for as_lietensor()
assert is_shared(g3_data, g6)

# -- LieTensor.new()
g7 = g3.new_lietensor(g3_data)
assert not is_shared(g3_data, g7)  # doesn't share storage
assert g7.grad_fn is None  # creates a leaf

# ### Lie operations
v = torch.randn(batch_size, 6)

# Exponential and logarithmic map
out1 = lie.SE3.exp(v)  # also lie.exp(v, g1.ltype)
print(f"Exp map returns a {type(out1)}.")
out2 = g1.log()  # also lie.log(g1)
print(f"Log map returns a {type(out2)}.")

# Inverse
out1 = g1.inv()  # also lie.inv(g1)

# Compose
# also lie.compose(g1, g2)
out1 = g1.compose(g2)  # type: ignore

# Differentiable jacobians
jacs, out = g1.jcompose(g2)  # type: ignore
print("Jacobians output is a 2-tuple.")
print("    First element is a list of jacobians, one per group argument.")
print(f"    For compose this means length {len(jacs)}.")
print("    The second element of the tuple is the result of the operation itself.")
print(f"    Which for compose is a {type(out).__name__}.")

# Other options:
#   * adj(), hat(), vee(), retract(), local(),
#   * Jacobians: jlog(), jinv(), jexp()

# ### Overriden operators
# Compose
out2 = g1 * g2
torch.testing.assert_close(out1, out2, check_dtype=True)

# Transfrom from (from local to world coordinate frame)
p = torch.randn(batch_size, 3)
pt1 = g1.transform_from(p)
pt2 = g1 @ p
torch.testing.assert_close(pt1, pt2)

# For convenience, we provide a context to drop all ltype checks, and operate
# on raw tensor data. However, keep in mind that this is prone to error.
# Here is one example of how this works.
with lie.as_euclidean():
    gg1 = torch.sin(g1)
# The above is the same as this next call, but the context might be more convenient
# if one is doing similar hacky stuff on several group objects.
gg2 = torch.sin(g1._t)
torch.testing.assert_close(gg1, gg2)
print("Success: We just did some ops that make no sense for SE3 tensors.")

# ### Lie tensors can also be used as leaf tensors for torch optimizers
g1 = lie.SE3.rand(1, requires_grad=True)
g2 = lie.SE3.rand(1)

opt = torch.optim.Adam([g1], lr=0.1)

for i in range(10):
    opt.zero_grad()
    d = g1.local(g2)
    loss = torch.sum(d**2)
    loss.backward()
    opt.step()
    print(f"Iter {i}. Loss: {loss.item(): .3f}")
```


## Citing torchlie

If you use torchlie in your work, please cite the [paper](https://arxiv.org/abs/2207.09442) with the BibTeX below.

```bibtex
@article{pineda2022theseus,
  title   = {{Theseus: A Library for Differentiable Nonlinear Optimization}},
  author  = {Luis Pineda and Taosha Fan and Maurizio Monge and Shobha Venkataraman and Paloma Sodhi and Ricky TQ Chen and Joseph Ortiz and Daniel DeTone and Austin Wang and Stuart Anderson and Jing Dong and Brandon Amos and Mustafa Mukadam},
  journal = {Advances in Neural Information Processing Systems},
  year    = {2022}
}
```


## License

torchlie is MIT licensed. See the [LICENSE](https://github.com/facebookresearch/theseus/blob/main/torchlie/LICENSE) for details.


## Additional Information

- Join the community on [Github Discussions](https://github.com/facebookresearch/theseus/discussions) for questions and sugesstions.
- Use [Github Issues](https://github.com/facebookresearch/theseus/issues/new/choose) for bugs and features.
- See [CONTRIBUTING](https://github.com/facebookresearch/theseus/blob/main/CONTRIBUTING.md) if interested in helping out.

            

Raw data

            {
    "_id": null,
    "home_page": "https://github.com/facebookresearch/theseus/lie",
    "name": "torchlie",
    "maintainer": "",
    "docs_url": null,
    "requires_python": ">=3.8",
    "maintainer_email": "",
    "keywords": "lie groups,differentiable optimization",
    "author": "Meta Research",
    "author_email": "",
    "download_url": "https://files.pythonhosted.org/packages/bc/35/910e7127553455aa3b34cf32e3c6aab289cb4a11d95b16a67cccc9f65bb9/torchlie-0.1.0.tar.gz",
    "platform": null,
    "description": "# torchlie\n\n<p align=\"center\">\n    <!-- License -->\n    <a href=\"https://github.com/facebookresearch/theseus/blob/main/torchlie/LICENSE\">\n        <img src=\"https://img.shields.io/badge/license-MIT-blue.svg\" alt=\"License\" height=\"20\">\n    </a>\n    <!-- pypi -->\n    <a href=\"https://pypi.org/project/torchlie/\">\n        <img src=\"https://img.shields.io/pypi/v/torchlie\" alt=\"pypi\"\n        heigh=\"20\">\n    <!-- Downloads counter -->\n    <a href=\"https://pypi.org/project/torchlie/\">\n        <img src=\"https://pepy.tech/badge/torchlie\" alt=\"PyPi Downloads\" height=\"20\">\n    </a>\n    <!-- Python -->\n    <a href=\"https://www.python.org/downloads/release/\">\n        <img src=\"https://img.shields.io/badge/python-3.8%20%7C%203.9%20%7C%203.10-blue.svg\" alt=\"Python\" height=\"20\">\n    </a>\n    <!-- Pre-commit -->\n    <a href=\"https://github.com/pre-commit/pre-commit\">\n        <img src=\"https://img.shields.io/badge/pre--commit-enabled-green?logo=pre-commit&logoColor=white\" alt=\"pre-commit\" height=\"20\">\n    </a>\n    <!-- Black -->\n    <a href=\"https://github.com/psf/black\">\n        <img src=\"https://img.shields.io/badge/code%20style-black-000000.svg\" alt=\"black\" height=\"20\">\n    </a>\n    <!-- PRs -->\n    <a href=\"https://github.com/facebookresearch/theseus/blob/main/CONTRIBUTING.md\">\n        <img src=\"https://img.shields.io/badge/PRs-welcome-green.svg\" alt=\"PRs\" height=\"20\">\n    </a>\n</p>\n\n<p align=\"center\">\n    <i>A library for differentiable Lie groups</i>\n</p>\n\n-----\n\n## Getting Started\n\n### Prerequisites\n- We *strongly* recommend you install torchlie in a venv or conda environment with Python 3.8-3.10.\n- torchlie requires `torch` installation. To install for your particular CPU/CUDA configuration, follow the instructions in the PyTorch [website](https://pytorch.org/get-started/locally/).\n\n### Installing\n\n- **pypi**\n    ```bash\n    pip install torchlie\n    ```\n\n- #### **From source**\n    The simplest way to install torchlie from source is by running the following\n    ```bash\n    git clone https://github.com/facebookresearch/theseus.git && cd theseus/torchlie\n    pip install -e .\n    ```\n    If you are interested in contributing to torchlie, also install\n    ```bash\n    pip install -r ../requirements/dev.txt\n    pre-commit install\n    ```\n    and follow the more detailed instructions in [CONTRIBUTING](https://github.com/facebookresearch/theseus/blob/main/CONTRIBUTING.md).\n\n\n## Example\n\nThe example below is also available as a [script](https://github.com/facebookresearch/theseus/blob/main/examples/torchlie_api.py).\n\n```python\nimport torch\n\nimport torchlie as lie\nimport torchlie.functional as lieF\n\nbatch_size = 5\n\n# ### Lie Tensor creation functions\ng1 = lie.SE3.rand(batch_size, requires_grad=True)\nprint(f\"Created SE3 tensor with shape {g1.shape}\")\ng2 = g1.clone()\n\n# Identity element\ni1 = lie.SO3.identity(2)\ni2 = lie.SE3.identity(2)\nprint(\"SO3 identity\", i1, i1.shape)\nprint(\"SE3 identity\", i2, i2.shape)\n\n# Indexing\ng1_slice = g1[2:4]\nassert g1_slice.shape == (2, 3, 4)\ntorch.testing.assert_close(g1_slice._t, g1._t[2:4])  # type: ignore\ntry:\n    bad = g1[3, 2]\nexcept NotImplementedError:\n    print(\"INDEXING ERROR: Can only slice the first dimension for now.\")\n\n# ## Different constructors\ng3_data = lieF.SO3.rand(5, requires_grad=True)  # this is a regular tensor with SO3 data\n\n# Can create from a tensor as long as it's consistent with the desired ltype\ng3 = lie.from_tensor(g3_data, lie.SO3)  # keeps grad history\nassert g3.grad_fn is not None\ntry:\n    x = lie.from_tensor(torch.zeros(1, 3, 3), lie.SO3)\nexcept ValueError as e:\n    print(f\"ERROR: {e}\")\n\n\ndef is_shared(t1, t2):  # utility to check if memory is shared\n    return t1.storage().data_ptr() == t2.storage().data_ptr()\n\n\n# # Let's check different copy vs no-copy options\n# -- lie.SO3() lie.SE3()\ng3_leaf = lie.SO3(g3_data)  # creates a leaf tensor and copies data\nassert g3_leaf.grad_fn is None\nassert not is_shared(g3_leaf, g3_data)\n\n# -- lie.LieTensor() constructor is equivalent to lie.SO3()\ng3_leaf_2 = lie.LieTensor(g3_data, lie.SO3)\nassert g3_leaf_2.grad_fn is None\nassert not is_shared(g3_leaf_2, g3_data)\n\n\n# -- as_lietensor()\ng4 = lie.as_lietensor(g3_data, lie.SO3)\nassert is_shared(g3_data, g4)  # shares storage if possible\nassert g4.grad_fn is not None  # result is not a leaf tensor\n# Calling with a LieTensor returns the same tensor...\ng5 = lie.as_lietensor(g3, lie.SO3)\nassert g5 is g3\n# ... unless dtype or device is different\ng5_double = lie.as_lietensor(g3, lie.SO3, dtype=torch.double)\nassert g5_double is not g3\nassert not is_shared(g5_double, g3)\n\n# -- cast()\ng6 = lie.cast(g3_data, lie.SO3)  # alias for as_lietensor()\nassert is_shared(g3_data, g6)\n\n# -- LieTensor.new()\ng7 = g3.new_lietensor(g3_data)\nassert not is_shared(g3_data, g7)  # doesn't share storage\nassert g7.grad_fn is None  # creates a leaf\n\n# ### Lie operations\nv = torch.randn(batch_size, 6)\n\n# Exponential and logarithmic map\nout1 = lie.SE3.exp(v)  # also lie.exp(v, g1.ltype)\nprint(f\"Exp map returns a {type(out1)}.\")\nout2 = g1.log()  # also lie.log(g1)\nprint(f\"Log map returns a {type(out2)}.\")\n\n# Inverse\nout1 = g1.inv()  # also lie.inv(g1)\n\n# Compose\n# also lie.compose(g1, g2)\nout1 = g1.compose(g2)  # type: ignore\n\n# Differentiable jacobians\njacs, out = g1.jcompose(g2)  # type: ignore\nprint(\"Jacobians output is a 2-tuple.\")\nprint(\"    First element is a list of jacobians, one per group argument.\")\nprint(f\"    For compose this means length {len(jacs)}.\")\nprint(\"    The second element of the tuple is the result of the operation itself.\")\nprint(f\"    Which for compose is a {type(out).__name__}.\")\n\n# Other options:\n#   * adj(), hat(), vee(), retract(), local(),\n#   * Jacobians: jlog(), jinv(), jexp()\n\n# ### Overriden operators\n# Compose\nout2 = g1 * g2\ntorch.testing.assert_close(out1, out2, check_dtype=True)\n\n# Transfrom from (from local to world coordinate frame)\np = torch.randn(batch_size, 3)\npt1 = g1.transform_from(p)\npt2 = g1 @ p\ntorch.testing.assert_close(pt1, pt2)\n\n# For convenience, we provide a context to drop all ltype checks, and operate\n# on raw tensor data. However, keep in mind that this is prone to error.\n# Here is one example of how this works.\nwith lie.as_euclidean():\n    gg1 = torch.sin(g1)\n# The above is the same as this next call, but the context might be more convenient\n# if one is doing similar hacky stuff on several group objects.\ngg2 = torch.sin(g1._t)\ntorch.testing.assert_close(gg1, gg2)\nprint(\"Success: We just did some ops that make no sense for SE3 tensors.\")\n\n# ### Lie tensors can also be used as leaf tensors for torch optimizers\ng1 = lie.SE3.rand(1, requires_grad=True)\ng2 = lie.SE3.rand(1)\n\nopt = torch.optim.Adam([g1], lr=0.1)\n\nfor i in range(10):\n    opt.zero_grad()\n    d = g1.local(g2)\n    loss = torch.sum(d**2)\n    loss.backward()\n    opt.step()\n    print(f\"Iter {i}. Loss: {loss.item(): .3f}\")\n```\n\n\n## Citing torchlie\n\nIf you use torchlie in your work, please cite the [paper](https://arxiv.org/abs/2207.09442) with the BibTeX below.\n\n```bibtex\n@article{pineda2022theseus,\n  title   = {{Theseus: A Library for Differentiable Nonlinear Optimization}},\n  author  = {Luis Pineda and Taosha Fan and Maurizio Monge and Shobha Venkataraman and Paloma Sodhi and Ricky TQ Chen and Joseph Ortiz and Daniel DeTone and Austin Wang and Stuart Anderson and Jing Dong and Brandon Amos and Mustafa Mukadam},\n  journal = {Advances in Neural Information Processing Systems},\n  year    = {2022}\n}\n```\n\n\n## License\n\ntorchlie is MIT licensed. See the [LICENSE](https://github.com/facebookresearch/theseus/blob/main/torchlie/LICENSE) for details.\n\n\n## Additional Information\n\n- Join the community on [Github Discussions](https://github.com/facebookresearch/theseus/discussions) for questions and sugesstions.\n- Use [Github Issues](https://github.com/facebookresearch/theseus/issues/new/choose) for bugs and features.\n- See [CONTRIBUTING](https://github.com/facebookresearch/theseus/blob/main/CONTRIBUTING.md) if interested in helping out.\n",
    "bugtrack_url": null,
    "license": "",
    "summary": "Torch extension for differentiable Lie groups.",
    "version": "0.1.0",
    "project_urls": {
        "Homepage": "https://github.com/facebookresearch/theseus/lie"
    },
    "split_keywords": [
        "lie groups",
        "differentiable optimization"
    ],
    "urls": [
        {
            "comment_text": "",
            "digests": {
                "blake2b_256": "5492a3d4e0f91afbafd331fb7441aea7c94f7a8ee6f3c029e9e6246926ffd380",
                "md5": "11c2a6b002ce16cd926e0ea9f362a1d3",
                "sha256": "a91b6f56ba7f4a5adc276e02638d3fbc13c94ff3afa7b706168bdd6003b5dfac"
            },
            "downloads": -1,
            "filename": "torchlie-0.1.0-py3-none-any.whl",
            "has_sig": false,
            "md5_digest": "11c2a6b002ce16cd926e0ea9f362a1d3",
            "packagetype": "bdist_wheel",
            "python_version": "py3",
            "requires_python": ">=3.8",
            "size": 28957,
            "upload_time": "2023-06-28T01:56:13",
            "upload_time_iso_8601": "2023-06-28T01:56:13.863463Z",
            "url": "https://files.pythonhosted.org/packages/54/92/a3d4e0f91afbafd331fb7441aea7c94f7a8ee6f3c029e9e6246926ffd380/torchlie-0.1.0-py3-none-any.whl",
            "yanked": false,
            "yanked_reason": null
        },
        {
            "comment_text": "",
            "digests": {
                "blake2b_256": "bc35910e7127553455aa3b34cf32e3c6aab289cb4a11d95b16a67cccc9f65bb9",
                "md5": "1cb320ad02dd957a94e00f9438bedcd2",
                "sha256": "7874c4e8ed320b1850a1457260333d4972725627cf79efa04ae4d19259cc2e08"
            },
            "downloads": -1,
            "filename": "torchlie-0.1.0.tar.gz",
            "has_sig": false,
            "md5_digest": "1cb320ad02dd957a94e00f9438bedcd2",
            "packagetype": "sdist",
            "python_version": "source",
            "requires_python": ">=3.8",
            "size": 28138,
            "upload_time": "2023-06-28T01:56:16",
            "upload_time_iso_8601": "2023-06-28T01:56:16.758752Z",
            "url": "https://files.pythonhosted.org/packages/bc/35/910e7127553455aa3b34cf32e3c6aab289cb4a11d95b16a67cccc9f65bb9/torchlie-0.1.0.tar.gz",
            "yanked": false,
            "yanked_reason": null
        }
    ],
    "upload_time": "2023-06-28 01:56:16",
    "github": true,
    "gitlab": false,
    "bitbucket": false,
    "codeberg": false,
    "github_user": "facebookresearch",
    "github_project": "theseus",
    "travis_ci": false,
    "coveralls": false,
    "github_actions": false,
    "circle": true,
    "lcname": "torchlie"
}
        
Elapsed time: 0.45983s