glaucus


Nameglaucus JSON
Version 1.1.4 PyPI version JSON
download
home_page
SummaryGlaucus is a PyTorch complex-valued ML autoencoder & RF estimation python module.
upload_time2024-02-12 16:54:44
maintainer
docs_urlNone
author
requires_python>=3.8
license
keywords dsp ml autoencoder sigint rf
VCS
bugtrack_url
requirements No requirements were recorded.
Travis-CI No Travis.
coveralls test coverage No coveralls.
            ![Glaucus Atlanticus](https://upload.wikimedia.org/wikipedia/commons/thumb/2/21/Glaucus_atlanticus_1_cropped.jpg/247px-Glaucus_atlanticus_1_cropped.jpg)

# Glaucus

The Aerospace Corporation is proud to present our complex-valued encoder,
decoder, and a new loss function for radio frequency (RF) digital signal
processing (DSP) in PyTorch.

## Video (click to play)

[<img src="https://i.vimeocdn.com/video/1583946742-851ad3621192f133ca667bc87f4050276e450fcc721f117bbcd93b67cb0535f8-d_1000">](https://vimeo.com/787670661/ce13da4cd9)

## Using

### Install

* via PyPI: `pip install glaucus`
* via source: `pip install .`

### Testing

* `pytest`
* `coverage run`
* `pylint glaucus tests`

### Use pre-trained model with SigMF data

Load quantized model and return compressed signal vector & reconstruction.
Our weights were trained & evaluated on a corpus of 200 GB of RF waveforms with
various added RF impairments for a 1 PB training set.

```python
import torch
import sigmf
from glaucus import GlaucusAE

# create model
model = GlaucusAE(bottleneck_quantize=True, data_format='nl')
model = torch.quantization.prepare(model)
# get weights for quantized model
state_dict = torch.hub.load_state_dict_from_url(
    'https://github.com/the-aerospace-corporation/glaucus/releases/download/v1.1.0/glaucus-512-3275-5517642b.pth',
    map_location='cpu')
model.load_state_dict(state_dict)
# prepare for prediction
model.freeze()
model.eval()
torch.quantization.convert(model, inplace=True)
# get samples into NL tensor
x_sigmf = sigmf.sigmffile.fromfile('example.sigmf')
x_tensor = torch.from_numpy(x_sigmf.read_samples())
# create prediction & quint8 signal vector
y_tensor, y_encoded = model(x_samples)
# get signal vector as uint8
y_encoded_uint8 = torch.int_repr(y_encoded)
```

#### Higher-accuracy pre-trained model

```python
# define architecture
import torch
from glaucus import blockgen, GlaucusAE

encoder_blocks = blockgen(steps=6, spatial_in=4096, spatial_out=16, filters_in=2, filters_out=64, mode='encoder')
decoder_blocks = blockgen(steps=6, spatial_in=16, spatial_out=4096, filters_in=64, filters_out=2, mode='decoder')
# create model
model = GlaucusAE(encoder_blocks, decoder_blocks, bottleneck_in=1024, bottleneck_out=1024, bottleneck_quantize=True, data_format='nl')
model = torch.quantization.prepare(model)
# get weights for quantized model
state_dict = torch.hub.load_state_dict_from_url(
    'https://github.com/the-aerospace-corporation/glaucus/releases/download/v1.1.0/glaucus-1024-761-c49063fd.pth',
    map_location='cpu')
model.load_state_dict(state_dict)
# see above for rest
```

#### Use pre-trained model & discard quantization layers

```python
# create model, but skip quantization
from glaucus.utils import adapt_glaucus_quantized_weights
model = GlaucusAE(bottleneck_quantize=False, data_format='nl')
state_dict = torch.hub.load_state_dict_from_url(
    'https://github.com/the-aerospace-corporation/glaucus/releases/download/v1.1.0/glaucus-512-3275-5517642b.pth',
    map_location='cpu')
state_dict = adapt_glaucus_quantized_weights(state_dict)
# ignore "unexpected_keys" warning
model.load_state_dict(state_dict, strict=False)
# prepare for evaluation mode
model.freeze()
model.eval()
# see above for rest
```

### Get loss between two RF signals

```python
import np
import torch
import glaucus

# create criterion
loss = glaucus.RFLoss(spatial_size=128, data_format='nl')

# create some signal
xxx = torch.randn(128, dtype=torch.complex64)
# alter signal with 1% freq offset
yyy = xxx * np.exp(1j * 2 * np.pi * 0.01 * np.arange(128))

# return loss
loss(xxx, yyy)
```

### Train model with TorchSig

*partially implemented pending update or replace with notebook example*

```python
import lightning as pl
from glaucus import GlaucusAE

model = GlaucusAE(data_format='nl')

# this takes a very long time if no cache is available
signal_data = torchsig.datasets.Sig53(root=str(cache_path))
# 80 / 10 / 10 split
train_dataset, val_dataset, test_dataset = torch.utils.data.random_split(
    signal_data,
    (len(signal_data)*np.array([0.8, 0.1, 0.1])).astype(int),
    generator=torch.Generator().manual_seed(0xcab005e)
)
class RFDataModule(pl.LightningDataModule):
    '''
    defines the dataloaders for train, val, test and uses datasets
    '''
    def __init__(self, train_dataset=None, val_dataset=None, test_dataset=None,
                 num_workers=16, batch_size=32):
        super().__init__()
        self.batch_size = batch_size
        self.num_workers = num_workers
        self.train_dataset = train_dataset
        self.val_dataset = val_dataset
        self.test_dataset = test_dataset

    def train_dataloader(self):
        return DataLoader(self.train_dataset, num_workers=self.num_workers, batch_size=self.batch_size, shuffle=True, pin_memory=True)
    def val_dataloader(self):
        return DataLoader(self.val_dataset, num_workers=self.num_workers, batch_size=self.batch_size, shuffle=False, pin_memory=True)
    def test_dataloader(self):
        return DataLoader(self.test_dataset, num_workers=self.num_workers, batch_size=self.batch_size, shuffle=False, pin_memory=True)

loader = RFDataModule(
    train_dataset=train_dataset,
    val_dataset=val_dataset,
    test_dataset=test_dataset,
    batch_size=batch_size, num_workers=num_workers)

trainer = pl.Trainer()
trainer.fit(model, loader)

# rewind to best checkpoint
model.load_from_checkpoint(trainer.checkpoint_callback.best_model_path, strict=False)
```

## Pre-trained Model List

| desc     | link                                                                                                                                                   | size (MB) | params (M) | multiadds (M) | provenance                                                    |
|----------|--------------------------------------------------------------------------------------------------------------------------------------------------------|-----------|------------|---------------|---------------------------------------------------------------|
| small    | [glaucus-512-3275-5517642b](https://github.com/the-aerospace-corporation/glaucus/releases/download/v1.1.0/glaucus-512-3275-5517642b.pth)               | 8.5       | 2.030      | 259           | .009 pfs-days on modulation-only Aerospace DSet               |
| accurate | [glaucus-1024-761-c49063fd](https://github.com/the-aerospace-corporation/glaucus/releases/download/v1.1.0/glaucus-1024-761-c49063fd.pth)               | 11        | 2.873      | 380           | .035 pfs-days modulation & general waveform Aerospace Dset    |
| sig53    | [glaucus-1024-sig53TLe37-2956bcb6](https://github.com/the-aerospace-corporation/glaucus/releases/download/v1.1.3/glaucus-1024-sig53TLe37-2956bcb6.pth) | 11        | 2.873      | 380           | transfer learning from glaucus-1024-761-c49063fd w/Sig53 Dset |

### Note on pfs-days

Per [OpenAI appendix](https://openai.com/blog/ai-and-compute/#appendixmethods) here is the correct math (method 1):

* `pfs_days` = (add-multiplies per forward pass) * (2 FLOPs/add-multiply) * (3 for forward and backward pass) * (number of examples in dataset) * (number of epochs) / (flop per petaflop) / (seconds per day)
* (number of examples in dataset) * (number of epochs) = steps * batchsize
* 1 `pfs-day` ≈ (8x V100 GPUs at 100% efficiency for 1 day) ≈ (100x GTX1080s at 100% efficiency for 1 day) ≈ (35x GTX 2080s at 100% efficiency for 1 day) ≈ [500 kWh](https://twitter.com/id_aa_carmack/status/1192513743974019072)

## Papers

This code is documented by the two following IEEE publications.

### Glaucus: A Complex-Valued Radio Signal Autoencoder
[![DOI](https://zenodo.org/badge/DOI/10.1109/AERO55745.2023.10115599.svg)](https://doi.org/10.1109/AERO55745.2023.10115599)

A complex-valued autoencoder neural network capable of compressing & denoising radio frequency (RF) signals with arbitrary model scaling is proposed. Complex-valued time samples received with various impairments are decoded into an embedding vector, then encoded back into complex-valued time samples. The embedding and the related latent space allow search, comparison, and clustering of signals. Traditional signal processing tasks like specific emitter identification, geolocation, or ambiguity estimation can utilize multiple compressed embeddings simultaneously. This paper demonstrates an autoencoder implementation capable of 64x compression hardened against RF channel impairments. The autoencoder allows separate or compound scaling of network depth, width, and resolution to target both embedded and data center deployment with differing resources. The common building block is inspired by the Fused Inverted Residual Block (Fused-MBConv), popularized by EfficientNetV2 \& MobileNetV3, with kernel sizes more appropriate for time-series signal processing

### Complex-Valued Radio Signal Loss for Neural Networks
[![DOI](https://zenodo.org/badge/DOI/10.1109/AERO55745.2023.10116006.svg)](https://doi.org/10.1109/AERO55745.2023.10116006)

A new optimized loss for training complex-valued neural networks that require reconstruction of radio signals is proposed. Given a complex-valued time series this method incorporates loss from spectrograms with multiple aspect ratios, cross-correlation loss, and loss from amplitude envelopes in the time \& frequency domains. When training a neural network an optimizer will observe batch loss and backpropagate this value through the network to determine how to update the model parameters. The proposed loss is robust to typical radio impairments and co-channel interference that would explode a naive mean-square-error approach. This robust loss enables higher quality steps along the loss surface which enables training of models specifically designed for impaired radio input. Loss vs channel impairment is shown in comparison to mean-squared error for an ensemble of common channel effects.

## Contributing

Do you have code you would like to contribute to this Aerospace project?

We are excited to work with you. We are able to accept small changes
immediately and require a Contributor License Agreement (CLA) for larger
changesets. Generally documentation and other minor changes less than 10 lines
do not require a CLA. The Aerospace Corporation CLA is based on the well-known
[Harmony Agreements CLA](http://harmonyagreements.org/) created by Canonical,
and protects the rights of The Aerospace Corporation, our customers, and you as
the contributor. [You can find our CLA here](https://aerospace.org/sites/default/files/2020-12/Aerospace-CLA-2020final.pdf).

Please complete the CLA and send us the executed copy. Once a CLA is on file we
can accept pull requests on GitHub or GitLab. If you have any questions, please
e-mail us at [oss@aero.org](mailto:oss@aero.org).

## Licensing

The Aerospace Corporation supports Free & Open Source Software and we publish
our work with GPL-compatible licenses. If the license attached to the project
is not suitable for your needs, our projects are also available under an
alternative license. An alternative license can allow you to create proprietary
applications around Aerospace products without being required to meet the
obligations of the GPL. To inquire about an alternative license, please get in
touch with us at [oss@aero.org](mailto:oss@aero.org).

## To-Do

* allow `pretrained_weights` during model init
* add training notebook and colab example

            

Raw data

            {
    "_id": null,
    "home_page": "",
    "name": "glaucus",
    "maintainer": "",
    "docs_url": null,
    "requires_python": ">=3.8",
    "maintainer_email": "",
    "keywords": "dsp,ml,autoencoder,sigint,rf",
    "author": "",
    "author_email": "Kyle Logue <kyle.logue@aero.org>",
    "download_url": "https://files.pythonhosted.org/packages/2e/6b/f270157ddb202714ed7e34865d6aa351445d7001402b7b10063ab4dfdcdb/glaucus-1.1.4.tar.gz",
    "platform": null,
    "description": "![Glaucus Atlanticus](https://upload.wikimedia.org/wikipedia/commons/thumb/2/21/Glaucus_atlanticus_1_cropped.jpg/247px-Glaucus_atlanticus_1_cropped.jpg)\n\n# Glaucus\n\nThe Aerospace Corporation is proud to present our complex-valued encoder,\ndecoder, and a new loss function for radio frequency (RF) digital signal\nprocessing (DSP) in PyTorch.\n\n## Video (click to play)\n\n[<img src=\"https://i.vimeocdn.com/video/1583946742-851ad3621192f133ca667bc87f4050276e450fcc721f117bbcd93b67cb0535f8-d_1000\">](https://vimeo.com/787670661/ce13da4cd9)\n\n## Using\n\n### Install\n\n* via PyPI: `pip install glaucus`\n* via source: `pip install .`\n\n### Testing\n\n* `pytest`\n* `coverage run`\n* `pylint glaucus tests`\n\n### Use pre-trained model with SigMF data\n\nLoad quantized model and return compressed signal vector & reconstruction.\nOur weights were trained & evaluated on a corpus of 200 GB of RF waveforms with\nvarious added RF impairments for a 1 PB training set.\n\n```python\nimport torch\nimport sigmf\nfrom glaucus import GlaucusAE\n\n# create model\nmodel = GlaucusAE(bottleneck_quantize=True, data_format='nl')\nmodel = torch.quantization.prepare(model)\n# get weights for quantized model\nstate_dict = torch.hub.load_state_dict_from_url(\n    'https://github.com/the-aerospace-corporation/glaucus/releases/download/v1.1.0/glaucus-512-3275-5517642b.pth',\n    map_location='cpu')\nmodel.load_state_dict(state_dict)\n# prepare for prediction\nmodel.freeze()\nmodel.eval()\ntorch.quantization.convert(model, inplace=True)\n# get samples into NL tensor\nx_sigmf = sigmf.sigmffile.fromfile('example.sigmf')\nx_tensor = torch.from_numpy(x_sigmf.read_samples())\n# create prediction & quint8 signal vector\ny_tensor, y_encoded = model(x_samples)\n# get signal vector as uint8\ny_encoded_uint8 = torch.int_repr(y_encoded)\n```\n\n#### Higher-accuracy pre-trained model\n\n```python\n# define architecture\nimport torch\nfrom glaucus import blockgen, GlaucusAE\n\nencoder_blocks = blockgen(steps=6, spatial_in=4096, spatial_out=16, filters_in=2, filters_out=64, mode='encoder')\ndecoder_blocks = blockgen(steps=6, spatial_in=16, spatial_out=4096, filters_in=64, filters_out=2, mode='decoder')\n# create model\nmodel = GlaucusAE(encoder_blocks, decoder_blocks, bottleneck_in=1024, bottleneck_out=1024, bottleneck_quantize=True, data_format='nl')\nmodel = torch.quantization.prepare(model)\n# get weights for quantized model\nstate_dict = torch.hub.load_state_dict_from_url(\n    'https://github.com/the-aerospace-corporation/glaucus/releases/download/v1.1.0/glaucus-1024-761-c49063fd.pth',\n    map_location='cpu')\nmodel.load_state_dict(state_dict)\n# see above for rest\n```\n\n#### Use pre-trained model & discard quantization layers\n\n```python\n# create model, but skip quantization\nfrom glaucus.utils import adapt_glaucus_quantized_weights\nmodel = GlaucusAE(bottleneck_quantize=False, data_format='nl')\nstate_dict = torch.hub.load_state_dict_from_url(\n    'https://github.com/the-aerospace-corporation/glaucus/releases/download/v1.1.0/glaucus-512-3275-5517642b.pth',\n    map_location='cpu')\nstate_dict = adapt_glaucus_quantized_weights(state_dict)\n# ignore \"unexpected_keys\" warning\nmodel.load_state_dict(state_dict, strict=False)\n# prepare for evaluation mode\nmodel.freeze()\nmodel.eval()\n# see above for rest\n```\n\n### Get loss between two RF signals\n\n```python\nimport np\nimport torch\nimport glaucus\n\n# create criterion\nloss = glaucus.RFLoss(spatial_size=128, data_format='nl')\n\n# create some signal\nxxx = torch.randn(128, dtype=torch.complex64)\n# alter signal with 1% freq offset\nyyy = xxx * np.exp(1j * 2 * np.pi * 0.01 * np.arange(128))\n\n# return loss\nloss(xxx, yyy)\n```\n\n### Train model with TorchSig\n\n*partially implemented pending update or replace with notebook example*\n\n```python\nimport lightning as pl\nfrom glaucus import GlaucusAE\n\nmodel = GlaucusAE(data_format='nl')\n\n# this takes a very long time if no cache is available\nsignal_data = torchsig.datasets.Sig53(root=str(cache_path))\n# 80 / 10 / 10 split\ntrain_dataset, val_dataset, test_dataset = torch.utils.data.random_split(\n    signal_data,\n    (len(signal_data)*np.array([0.8, 0.1, 0.1])).astype(int),\n    generator=torch.Generator().manual_seed(0xcab005e)\n)\nclass RFDataModule(pl.LightningDataModule):\n    '''\n    defines the dataloaders for train, val, test and uses datasets\n    '''\n    def __init__(self, train_dataset=None, val_dataset=None, test_dataset=None,\n                 num_workers=16, batch_size=32):\n        super().__init__()\n        self.batch_size = batch_size\n        self.num_workers = num_workers\n        self.train_dataset = train_dataset\n        self.val_dataset = val_dataset\n        self.test_dataset = test_dataset\n\n    def train_dataloader(self):\n        return DataLoader(self.train_dataset, num_workers=self.num_workers, batch_size=self.batch_size, shuffle=True, pin_memory=True)\n    def val_dataloader(self):\n        return DataLoader(self.val_dataset, num_workers=self.num_workers, batch_size=self.batch_size, shuffle=False, pin_memory=True)\n    def test_dataloader(self):\n        return DataLoader(self.test_dataset, num_workers=self.num_workers, batch_size=self.batch_size, shuffle=False, pin_memory=True)\n\nloader = RFDataModule(\n    train_dataset=train_dataset,\n    val_dataset=val_dataset,\n    test_dataset=test_dataset,\n    batch_size=batch_size, num_workers=num_workers)\n\ntrainer = pl.Trainer()\ntrainer.fit(model, loader)\n\n# rewind to best checkpoint\nmodel.load_from_checkpoint(trainer.checkpoint_callback.best_model_path, strict=False)\n```\n\n## Pre-trained Model List\n\n| desc     | link                                                                                                                                                   | size (MB) | params (M) | multiadds (M) | provenance                                                    |\n|----------|--------------------------------------------------------------------------------------------------------------------------------------------------------|-----------|------------|---------------|---------------------------------------------------------------|\n| small    | [glaucus-512-3275-5517642b](https://github.com/the-aerospace-corporation/glaucus/releases/download/v1.1.0/glaucus-512-3275-5517642b.pth)               | 8.5       | 2.030      | 259           | .009 pfs-days on modulation-only Aerospace DSet               |\n| accurate | [glaucus-1024-761-c49063fd](https://github.com/the-aerospace-corporation/glaucus/releases/download/v1.1.0/glaucus-1024-761-c49063fd.pth)               | 11        | 2.873      | 380           | .035 pfs-days modulation & general waveform Aerospace Dset    |\n| sig53    | [glaucus-1024-sig53TLe37-2956bcb6](https://github.com/the-aerospace-corporation/glaucus/releases/download/v1.1.3/glaucus-1024-sig53TLe37-2956bcb6.pth) | 11        | 2.873      | 380           | transfer learning from glaucus-1024-761-c49063fd w/Sig53 Dset |\n\n### Note on pfs-days\n\nPer [OpenAI appendix](https://openai.com/blog/ai-and-compute/#appendixmethods) here is the correct math (method 1):\n\n* `pfs_days` = (add-multiplies per forward pass) * (2 FLOPs/add-multiply) * (3 for forward and backward pass) * (number of examples in dataset) * (number of epochs) / (flop per petaflop) / (seconds per day)\n* (number of examples in dataset) * (number of epochs) = steps * batchsize\n* 1 `pfs-day` \u2248 (8x V100 GPUs at 100% efficiency for 1 day) \u2248 (100x GTX1080s at 100% efficiency for 1 day) \u2248 (35x GTX 2080s at 100% efficiency for 1 day) \u2248 [500 kWh](https://twitter.com/id_aa_carmack/status/1192513743974019072)\n\n## Papers\n\nThis code is documented by the two following IEEE publications.\n\n### Glaucus: A Complex-Valued Radio Signal Autoencoder\n[![DOI](https://zenodo.org/badge/DOI/10.1109/AERO55745.2023.10115599.svg)](https://doi.org/10.1109/AERO55745.2023.10115599)\n\nA complex-valued autoencoder neural network capable of compressing & denoising radio frequency (RF) signals with arbitrary model scaling is proposed. Complex-valued time samples received with various impairments are decoded into an embedding vector, then encoded back into complex-valued time samples. The embedding and the related latent space allow search, comparison, and clustering of signals. Traditional signal processing tasks like specific emitter identification, geolocation, or ambiguity estimation can utilize multiple compressed embeddings simultaneously. This paper demonstrates an autoencoder implementation capable of 64x compression hardened against RF channel impairments. The autoencoder allows separate or compound scaling of network depth, width, and resolution to target both embedded and data center deployment with differing resources. The common building block is inspired by the Fused Inverted Residual Block (Fused-MBConv), popularized by EfficientNetV2 \\& MobileNetV3, with kernel sizes more appropriate for time-series signal processing\n\n### Complex-Valued Radio Signal Loss for Neural Networks\n[![DOI](https://zenodo.org/badge/DOI/10.1109/AERO55745.2023.10116006.svg)](https://doi.org/10.1109/AERO55745.2023.10116006)\n\nA new optimized loss for training complex-valued neural networks that require reconstruction of radio signals is proposed. Given a complex-valued time series this method incorporates loss from spectrograms with multiple aspect ratios, cross-correlation loss, and loss from amplitude envelopes in the time \\& frequency domains. When training a neural network an optimizer will observe batch loss and backpropagate this value through the network to determine how to update the model parameters. The proposed loss is robust to typical radio impairments and co-channel interference that would explode a naive mean-square-error approach. This robust loss enables higher quality steps along the loss surface which enables training of models specifically designed for impaired radio input. Loss vs channel impairment is shown in comparison to mean-squared error for an ensemble of common channel effects.\n\n## Contributing\n\nDo you have code you would like to contribute to this Aerospace project?\n\nWe are excited to work with you. We are able to accept small changes\nimmediately and require a Contributor License Agreement (CLA) for larger\nchangesets. Generally documentation and other minor changes less than 10 lines\ndo not require a CLA. The Aerospace Corporation CLA is based on the well-known\n[Harmony Agreements CLA](http://harmonyagreements.org/) created by Canonical,\nand protects the rights of The Aerospace Corporation, our customers, and you as\nthe contributor. [You can find our CLA here](https://aerospace.org/sites/default/files/2020-12/Aerospace-CLA-2020final.pdf).\n\nPlease complete the CLA and send us the executed copy. Once a CLA is on file we\ncan accept pull requests on GitHub or GitLab. If you have any questions, please\ne-mail us at [oss@aero.org](mailto:oss@aero.org).\n\n## Licensing\n\nThe Aerospace Corporation supports Free & Open Source Software and we publish\nour work with GPL-compatible licenses. If the license attached to the project\nis not suitable for your needs, our projects are also available under an\nalternative license. An alternative license can allow you to create proprietary\napplications around Aerospace products without being required to meet the\nobligations of the GPL. To inquire about an alternative license, please get in\ntouch with us at [oss@aero.org](mailto:oss@aero.org).\n\n## To-Do\n\n* allow `pretrained_weights` during model init\n* add training notebook and colab example\n",
    "bugtrack_url": null,
    "license": "",
    "summary": "Glaucus is a PyTorch complex-valued ML autoencoder & RF estimation python module.",
    "version": "1.1.4",
    "project_urls": {
        "repository": "https://github.com/the-aerospace-corporation/glaucus"
    },
    "split_keywords": [
        "dsp",
        "ml",
        "autoencoder",
        "sigint",
        "rf"
    ],
    "urls": [
        {
            "comment_text": "",
            "digests": {
                "blake2b_256": "e86614d9f1a90d89fe6b041eef1033678f95a670015e5bef8cda8386fad75867",
                "md5": "7a07d1759a6cfcd9950c23ee04984ed6",
                "sha256": "b3cfa784e8ba84ab622c7fb791dfd01a9662b1df6fce8eaf07fe723b5831cffd"
            },
            "downloads": -1,
            "filename": "glaucus-1.1.4-py3-none-any.whl",
            "has_sig": false,
            "md5_digest": "7a07d1759a6cfcd9950c23ee04984ed6",
            "packagetype": "bdist_wheel",
            "python_version": "py3",
            "requires_python": ">=3.8",
            "size": 25303,
            "upload_time": "2024-02-12T16:54:42",
            "upload_time_iso_8601": "2024-02-12T16:54:42.548748Z",
            "url": "https://files.pythonhosted.org/packages/e8/66/14d9f1a90d89fe6b041eef1033678f95a670015e5bef8cda8386fad75867/glaucus-1.1.4-py3-none-any.whl",
            "yanked": false,
            "yanked_reason": null
        },
        {
            "comment_text": "",
            "digests": {
                "blake2b_256": "2e6bf270157ddb202714ed7e34865d6aa351445d7001402b7b10063ab4dfdcdb",
                "md5": "bd68336ae70cb08a624e62d08ed0438a",
                "sha256": "cd7cbd3ab2a0f60a5faf4056d3f0cd608ce9eedc44511229bbed8a1c49a881a9"
            },
            "downloads": -1,
            "filename": "glaucus-1.1.4.tar.gz",
            "has_sig": false,
            "md5_digest": "bd68336ae70cb08a624e62d08ed0438a",
            "packagetype": "sdist",
            "python_version": "source",
            "requires_python": ">=3.8",
            "size": 30694,
            "upload_time": "2024-02-12T16:54:44",
            "upload_time_iso_8601": "2024-02-12T16:54:44.827463Z",
            "url": "https://files.pythonhosted.org/packages/2e/6b/f270157ddb202714ed7e34865d6aa351445d7001402b7b10063ab4dfdcdb/glaucus-1.1.4.tar.gz",
            "yanked": false,
            "yanked_reason": null
        }
    ],
    "upload_time": "2024-02-12 16:54:44",
    "github": true,
    "gitlab": false,
    "bitbucket": false,
    "codeberg": false,
    "github_user": "the-aerospace-corporation",
    "github_project": "glaucus",
    "travis_ci": false,
    "coveralls": false,
    "github_actions": false,
    "lcname": "glaucus"
}
        
Elapsed time: 0.19215s