sqltrack


Namesqltrack JSON
Version 0.1.4 PyPI version JSON
download
home_page
SummarySQL-based experiment tracking
upload_time2023-03-15 09:10:29
maintainer
docs_urlNone
author
requires_python>=3.7
licenseMIT License Copyright (c) 2023 Joachim Folz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
keywords sql postgres postgresql machine learning ml experiment tracking experiment tracking jupyter jupyterlab notebook
VCS
bugtrack_url
requirements No requirements were recorded.
Travis-CI No Travis.
coveralls test coverage No coveralls.
            # SQLTrack

SQLTrack is a set of tools to track your (machine learning)
experiments.
While using other tools like 
[Sacred](https://github.com/IDSIA/sacred)
or
[mlflow tracking](https://mlflow.org/docs/latest/tracking.html),
we found that they limited how we could track our
experiments and later what analyses we could perform.
What we realized is that it is ultimately futile for library
authors to guess how experiment data will be used.
If it was possible for a library to cater to every single
use case it would then become too bloated to use.

That is why our goal is to collect a wide variety of examples
for analyses and visualizations to empower our users,
instead of providing complex functionality in our package.

To that end, SQLTrack only provides a basic schema of
experiments, runs, and metrics that you can extend to suit
your needs, as well as some basic tools to set up the
database and store experiment data.



## Getting started

Currently SQLTrack supports PostgreSQL through the psycopg driver.
We don't plan on adding support any other databases, except SQLite
if there is demand for it.
We've tried using ORMs, but found that they made things way more
complicated than they needed to be and - most importantly - they
obfuscated the DB schema from users.
Ideally we would use standard SQL and let users bring their own
database Python DB-API 2.0 compatible driver, but that would mean
we lose access to advanced features like indexable JSONB columns.



### Installation

SQLTrack can be installed like any other Python package,
e.g., `pip install sqltrack`.
By default only core dependencies are installed,
which speeds up usage in containerized environments.
Core functionality located in the toplevel package
`sqltrack` allows tracking experiments and working
with the database.
To use some of the convenience functions for anaylsis later,
install the full package with `pip install sqltrack[full]`.

On Linux, your distribution repositories should include
a version of PostgreSQL you can use.
We develop against 13, but any currently supported
version should work.
There are also install instructions for
[MacOS](https://www.postgresql.org/download/macosx/)
and
[Windows](https://www.postgresql.org/download/windows/).



### Base schema

This is the basic schema SQLTrack defines (minus some details like indexes),
with tables `experiments`, `experiment_links`, `runs`,
`run_links`, and `metrics`.

`runs.status` has the custom enum type `runstatus`.
It behaves like text when used with the psycopg driver.
Possible values have been lifted from Slurm job status.

```SQL
BEGIN;

CREATE TABLE experiments (
	id BIGINT GENERATED BY DEFAULT AS IDENTITY,
	time_created TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
	name TEXT NOT NULL,
    comment TEXT,
    tags JSONB,
	PRIMARY KEY (id),
    UNIQUE(name)
);

CREATE TABLE experiment_links (
	from_id BIGINT NOT NULL,
	kind TEXT NOT NULL,
    to_id BIGINT NOT NULL,
	PRIMARY KEY(from_id, kind, to_id),
	FOREIGN KEY(from_id) REFERENCES experiments(id),
	FOREIGN KEY(to_id) REFERENCES experiments(id)
);

CREATE TYPE runstatus AS ENUM (
    'BOOT_FAIL',
    'CANCELLED',
    'CONFIGURING',
    'COMPLETED',
    'COMPLETING',
    'DEADLINE',
    'FAILED',
    'NODE_FAIL',
    'OUT_OF_MEMORY',
    'PENDING',
    'PREEMPTED',
    'RESV_DEL_HOLD',
    'REQUEUE_FED',
    'REQUEUE_HOLD',
    'REQUEUED',
    'RESIZING',
    'REVOKED',
    'RUNNING',
    'SIGNALING',
    'SPECIAL_EXIT',
    'STAGE_OUT',
    'STOPPED',
    'SUSPENDED',
    'TIMEOUT'
);

CREATE TABLE runs (
	id BIGINT GENERATED BY DEFAULT AS IDENTITY,
	experiment_id BIGINT NOT NULL,
	status runstatus NOT NULL DEFAULT 'PENDING',
	time_created TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
	time_started TIMESTAMP WITH TIME ZONE,
	time_updated TIMESTAMP WITH TIME ZONE,
    comment TEXT,
    tags JSONB,
    args JSONB,
    env JSONB,
	PRIMARY KEY(id),
	FOREIGN KEY(experiment_id) REFERENCES experiments(id) ON DELETE CASCADE
);

CREATE TABLE run_links (
	from_id BIGINT NOT NULL,
	kind TEXT NOT NULL,
    to_id BIGINT NOT NULL,
	PRIMARY KEY(from_id, kind, to_id),
	FOREIGN KEY(from_id) REFERENCES runs(id) ON DELETE CASCADE,
	FOREIGN KEY(to_id) REFERENCES runs(id) ON DELETE CASCADE
);

CREATE TABLE metrics (
	run_id INTEGER NOT NULL,
	step BIGINT NOT NULL DEFAULT 0,
	progress DOUBLE PRECISION NULL DEFAULT 0.0,
    PRIMARY KEY (run_id, step, progress),
	FOREIGN KEY(run_id) REFERENCES runs(id) ON DELETE CASCADE
);

END;
```

Note that the `metrics` table doesn't contain any columns to store metrics yet.
Users need to add these as required.
E.g., a script to add columns for timing, loss, and accuracy in 
train, validation, and test phases could look like this:

```SQL
BEGIN;

ALTER TABLE metrics
	ADD COLUMN train_start TIMESTAMP WITH TIME ZONE,
	ADD COLUMN train_end TIMESTAMP WITH TIME ZONE,
	ADD COLUMN train_loss FLOAT,
	ADD COLUMN train_top1 FLOAT,
	ADD COLUMN train_top5 FLOAT,
	ADD COLUMN val_start TIMESTAMP WITH TIME ZONE,
	ADD COLUMN val_end TIMESTAMP WITH TIME ZONE,
	ADD COLUMN val_loss FLOAT,
	ADD COLUMN val_top1 FLOAT,
	ADD COLUMN val_top5 FLOAT,
	ADD COLUMN test_start TIMESTAMP WITH TIME ZONE,
	ADD COLUMN test_end TIMESTAMP WITH TIME ZONE,
	ADD COLUMN test_loss FLOAT,
	ADD COLUMN test_top1 FLOAT,
	ADD COLUMN test_top5 FLOAT;

END;
```

Now you might ask why we make you add columns for your metrics,
because that might seem annoying and wasteful compared
to a normalized name+value approach like what `mlflow` uses.
But don't worry, because PostgreSQL is smart.
Any NULL values aren't actually stored.
It only stores values that are not NULL and uses a bitmap
to keep track of them.
Also, each row has a fixed size header of ~23 bytes and `mlflow`
uses one row per metric value.
Since we store many metric values in a row we can afford
really large bitmaps to track those NULL values before we
come out worse.

Put your instructions to add metrics columns etc. in a SQL
script file, e.g. `v001.sql`, for use later.
Add `v002.sql` etc. to update your schema.



### Setup the database

SQLTrack provides a simple tool to setup your database.

```
usage: sqltrack [-h] [-u USER] [-a HOST] [-d DATABASE] [-s SCHEMA] [-c CONFIG_PATH] {setup} ...

positional arguments:
  {setup}               Available commands.
    setup               Setup (and update) the database.

options:
  -h, --help            show this help message and exit
  -u USER, --user USER  username
  -a HOST, --host HOST  DB host (and port)
  -d DATABASE, --database DATABASE
                        database name
  -s SCHEMA, --schema SCHEMA
                        schema name
  -c CONFIG_PATH, --config-path CONFIG_PATH
                        path to config file
```

User, host, database, and schema as parameters given on the command line take priority,
but you can also define environment variables `SQLTRACK_DSN_<PARAM>` to set them.
More info on available parameters can be found
[here](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS).
Finally, most convenient is probably to store them in a config file.
The default path is `./sqltrack.conf`

```
user=<USER>
host=<HOST>
database=<DATABASE>
schema=<SCHEMA>
```

Those SQL script files you created earlier?
This is where you use them.
Run the setup command with them, e.g. `sqltrack setup v001.sql`.
This creates the base schema and updates it with your definitions.



### Track an experiment

```Python
from random import random
import sqltrack

def main():
    client = sqltrack.Client()
    experiment = sqltrack.Experiment(client, name="Very science, much data")
    run = experiment.get_run()
    with run.track():
        for epoch in range(90):
            metrics = {"train_loss": random(), "train_top1": random()}
            run.add_metrics(step=epoch, progress=epoch/epochs, **metrics)
```



### Analyzing results

This is where it's up to you.
We recommend Jupyter Lab to interact with the database,
but plain Jupyter or alternatives like
[Plotly Dash](https://dash.plotly.com/introduction)
work well too.
Look at the examples directory in our repository to get some ideas.
But really, you're the experimenter,
you know best what to do with your data.



### [Optional] Self-signed SSL certificate

You can create a SSL self-signed certificate to use with HTTPS:
```
openssl req -x509 -newkey rsa:4096 -keyout jupyter.key -out jupyter.crt -sha256 -days 365 -nodes
```

Start Jupyter Lab with your certificate:
```
jupyter-lab [options...] --certfile jupyter.crt --keyfile jupyter.key
```

            

Raw data

            {
    "_id": null,
    "home_page": "",
    "name": "sqltrack",
    "maintainer": "",
    "docs_url": null,
    "requires_python": ">=3.7",
    "maintainer_email": "Joachim Folz <joachim.folz@dfki.de>",
    "keywords": "SQL,postgres,PostgreSQL,machine learning,ML,experiment tracking,experiment,tracking,jupyter,jupyterlab,notebook",
    "author": "",
    "author_email": "Joachim Folz <joachim.folz@dfki.de>",
    "download_url": "https://files.pythonhosted.org/packages/ba/9a/777c0d1278f5ad067a5c0e57842d2677ca1269a9d4e5e21324369c5fa395/sqltrack-0.1.4.tar.gz",
    "platform": null,
    "description": "# SQLTrack\n\nSQLTrack is a set of tools to track your (machine learning)\nexperiments.\nWhile using other tools like \n[Sacred](https://github.com/IDSIA/sacred)\nor\n[mlflow tracking](https://mlflow.org/docs/latest/tracking.html),\nwe found that they limited how we could track our\nexperiments and later what analyses we could perform.\nWhat we realized is that it is ultimately futile for library\nauthors to guess how experiment data will be used.\nIf it was possible for a library to cater to every single\nuse case it would then become too bloated to use.\n\nThat is why our goal is to collect a wide variety of examples\nfor analyses and visualizations to empower our users,\ninstead of providing complex functionality in our package.\n\nTo that end, SQLTrack only provides a basic schema of\nexperiments, runs, and metrics that you can extend to suit\nyour needs, as well as some basic tools to set up the\ndatabase and store experiment data.\n\n\n\n## Getting started\n\nCurrently SQLTrack supports PostgreSQL through the psycopg driver.\nWe don't plan on adding support any other databases, except SQLite\nif there is demand for it.\nWe've tried using ORMs, but found that they made things way more\ncomplicated than they needed to be and - most importantly - they\nobfuscated the DB schema from users.\nIdeally we would use standard SQL and let users bring their own\ndatabase Python DB-API 2.0 compatible driver, but that would mean\nwe lose access to advanced features like indexable JSONB columns.\n\n\n\n### Installation\n\nSQLTrack can be installed like any other Python package,\ne.g., `pip install sqltrack`.\nBy default only core dependencies are installed,\nwhich speeds up usage in containerized environments.\nCore functionality located in the toplevel package\n`sqltrack` allows tracking experiments and working\nwith the database.\nTo use some of the convenience functions for anaylsis later,\ninstall the full package with `pip install sqltrack[full]`.\n\nOn Linux, your distribution repositories should include\na version of PostgreSQL you can use.\nWe develop against 13, but any currently supported\nversion should work.\nThere are also install instructions for\n[MacOS](https://www.postgresql.org/download/macosx/)\nand\n[Windows](https://www.postgresql.org/download/windows/).\n\n\n\n### Base schema\n\nThis is the basic schema SQLTrack defines (minus some details like indexes),\nwith tables `experiments`, `experiment_links`, `runs`,\n`run_links`, and `metrics`.\n\n`runs.status` has the custom enum type `runstatus`.\nIt behaves like text when used with the psycopg driver.\nPossible values have been lifted from Slurm job status.\n\n```SQL\nBEGIN;\n\nCREATE TABLE experiments (\n\tid BIGINT GENERATED BY DEFAULT AS IDENTITY,\n\ttime_created TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,\n\tname TEXT NOT NULL,\n    comment TEXT,\n    tags JSONB,\n\tPRIMARY KEY (id),\n    UNIQUE(name)\n);\n\nCREATE TABLE experiment_links (\n\tfrom_id BIGINT NOT NULL,\n\tkind TEXT NOT NULL,\n    to_id BIGINT NOT NULL,\n\tPRIMARY KEY(from_id, kind, to_id),\n\tFOREIGN KEY(from_id) REFERENCES experiments(id),\n\tFOREIGN KEY(to_id) REFERENCES experiments(id)\n);\n\nCREATE TYPE runstatus AS ENUM (\n    'BOOT_FAIL',\n    'CANCELLED',\n    'CONFIGURING',\n    'COMPLETED',\n    'COMPLETING',\n    'DEADLINE',\n    'FAILED',\n    'NODE_FAIL',\n    'OUT_OF_MEMORY',\n    'PENDING',\n    'PREEMPTED',\n    'RESV_DEL_HOLD',\n    'REQUEUE_FED',\n    'REQUEUE_HOLD',\n    'REQUEUED',\n    'RESIZING',\n    'REVOKED',\n    'RUNNING',\n    'SIGNALING',\n    'SPECIAL_EXIT',\n    'STAGE_OUT',\n    'STOPPED',\n    'SUSPENDED',\n    'TIMEOUT'\n);\n\nCREATE TABLE runs (\n\tid BIGINT GENERATED BY DEFAULT AS IDENTITY,\n\texperiment_id BIGINT NOT NULL,\n\tstatus runstatus NOT NULL DEFAULT 'PENDING',\n\ttime_created TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,\n\ttime_started TIMESTAMP WITH TIME ZONE,\n\ttime_updated TIMESTAMP WITH TIME ZONE,\n    comment TEXT,\n    tags JSONB,\n    args JSONB,\n    env JSONB,\n\tPRIMARY KEY(id),\n\tFOREIGN KEY(experiment_id) REFERENCES experiments(id) ON DELETE CASCADE\n);\n\nCREATE TABLE run_links (\n\tfrom_id BIGINT NOT NULL,\n\tkind TEXT NOT NULL,\n    to_id BIGINT NOT NULL,\n\tPRIMARY KEY(from_id, kind, to_id),\n\tFOREIGN KEY(from_id) REFERENCES runs(id) ON DELETE CASCADE,\n\tFOREIGN KEY(to_id) REFERENCES runs(id) ON DELETE CASCADE\n);\n\nCREATE TABLE metrics (\n\trun_id INTEGER NOT NULL,\n\tstep BIGINT NOT NULL DEFAULT 0,\n\tprogress DOUBLE PRECISION NULL DEFAULT 0.0,\n    PRIMARY KEY (run_id, step, progress),\n\tFOREIGN KEY(run_id) REFERENCES runs(id) ON DELETE CASCADE\n);\n\nEND;\n```\n\nNote that the `metrics` table doesn't contain any columns to store metrics yet.\nUsers need to add these as required.\nE.g., a script to add columns for timing, loss, and accuracy in \ntrain, validation, and test phases could look like this:\n\n```SQL\nBEGIN;\n\nALTER TABLE metrics\n\tADD COLUMN train_start TIMESTAMP WITH TIME ZONE,\n\tADD COLUMN train_end TIMESTAMP WITH TIME ZONE,\n\tADD COLUMN train_loss FLOAT,\n\tADD COLUMN train_top1 FLOAT,\n\tADD COLUMN train_top5 FLOAT,\n\tADD COLUMN val_start TIMESTAMP WITH TIME ZONE,\n\tADD COLUMN val_end TIMESTAMP WITH TIME ZONE,\n\tADD COLUMN val_loss FLOAT,\n\tADD COLUMN val_top1 FLOAT,\n\tADD COLUMN val_top5 FLOAT,\n\tADD COLUMN test_start TIMESTAMP WITH TIME ZONE,\n\tADD COLUMN test_end TIMESTAMP WITH TIME ZONE,\n\tADD COLUMN test_loss FLOAT,\n\tADD COLUMN test_top1 FLOAT,\n\tADD COLUMN test_top5 FLOAT;\n\nEND;\n```\n\nNow you might ask why we make you add columns for your metrics,\nbecause that might seem annoying and wasteful compared\nto a normalized name+value approach like what `mlflow` uses.\nBut don't worry, because PostgreSQL is smart.\nAny NULL values aren't actually stored.\nIt only stores values that are not NULL and uses a bitmap\nto keep track of them.\nAlso, each row has a fixed size header of ~23 bytes and `mlflow`\nuses one row per metric value.\nSince we store many metric values in a row we can afford\nreally large bitmaps to track those NULL values before we\ncome out worse.\n\nPut your instructions to add metrics columns etc. in a SQL\nscript file, e.g. `v001.sql`, for use later.\nAdd `v002.sql` etc. to update your schema.\n\n\n\n### Setup the database\n\nSQLTrack provides a simple tool to setup your database.\n\n```\nusage: sqltrack [-h] [-u USER] [-a HOST] [-d DATABASE] [-s SCHEMA] [-c CONFIG_PATH] {setup} ...\n\npositional arguments:\n  {setup}               Available commands.\n    setup               Setup (and update) the database.\n\noptions:\n  -h, --help            show this help message and exit\n  -u USER, --user USER  username\n  -a HOST, --host HOST  DB host (and port)\n  -d DATABASE, --database DATABASE\n                        database name\n  -s SCHEMA, --schema SCHEMA\n                        schema name\n  -c CONFIG_PATH, --config-path CONFIG_PATH\n                        path to config file\n```\n\nUser, host, database, and schema as parameters given on the command line take priority,\nbut you can also define environment variables `SQLTRACK_DSN_<PARAM>` to set them.\nMore info on available parameters can be found\n[here](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS).\nFinally, most convenient is probably to store them in a config file.\nThe default path is `./sqltrack.conf`\n\n```\nuser=<USER>\nhost=<HOST>\ndatabase=<DATABASE>\nschema=<SCHEMA>\n```\n\nThose SQL script files you created earlier?\nThis is where you use them.\nRun the setup command with them, e.g. `sqltrack setup v001.sql`.\nThis creates the base schema and updates it with your definitions.\n\n\n\n### Track an experiment\n\n```Python\nfrom random import random\nimport sqltrack\n\ndef main():\n    client = sqltrack.Client()\n    experiment = sqltrack.Experiment(client, name=\"Very science, much data\")\n    run = experiment.get_run()\n    with run.track():\n        for epoch in range(90):\n            metrics = {\"train_loss\": random(), \"train_top1\": random()}\n            run.add_metrics(step=epoch, progress=epoch/epochs, **metrics)\n```\n\n\n\n### Analyzing results\n\nThis is where it's up to you.\nWe recommend Jupyter Lab to interact with the database,\nbut plain Jupyter or alternatives like\n[Plotly Dash](https://dash.plotly.com/introduction)\nwork well too.\nLook at the examples directory in our repository to get some ideas.\nBut really, you're the experimenter,\nyou know best what to do with your data.\n\n\n\n### [Optional] Self-signed SSL certificate\n\nYou can create a SSL self-signed certificate to use with HTTPS:\n```\nopenssl req -x509 -newkey rsa:4096 -keyout jupyter.key -out jupyter.crt -sha256 -days 365 -nodes\n```\n\nStart Jupyter Lab with your certificate:\n```\njupyter-lab [options...] --certfile jupyter.crt --keyfile jupyter.key\n```\n",
    "bugtrack_url": null,
    "license": "MIT License  Copyright (c) 2023 Joachim Folz  Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:  The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.  THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ",
    "summary": "SQL-based experiment tracking",
    "version": "0.1.4",
    "split_keywords": [
        "sql",
        "postgres",
        "postgresql",
        "machine learning",
        "ml",
        "experiment tracking",
        "experiment",
        "tracking",
        "jupyter",
        "jupyterlab",
        "notebook"
    ],
    "urls": [
        {
            "comment_text": "",
            "digests": {
                "blake2b_256": "369173dda20bd71f96474c394a69f5aa185a782966e6f182bd9e031c601034a3",
                "md5": "89bae4fb8cf495b1fd097f0fae4a832f",
                "sha256": "6acf3ed70d897562f07c4aba0d860fbaba018ee6bcfe4322c08a197066e7ec6a"
            },
            "downloads": -1,
            "filename": "sqltrack-0.1.4-py3-none-any.whl",
            "has_sig": false,
            "md5_digest": "89bae4fb8cf495b1fd097f0fae4a832f",
            "packagetype": "bdist_wheel",
            "python_version": "py3",
            "requires_python": ">=3.7",
            "size": 27588,
            "upload_time": "2023-03-15T09:10:27",
            "upload_time_iso_8601": "2023-03-15T09:10:27.470564Z",
            "url": "https://files.pythonhosted.org/packages/36/91/73dda20bd71f96474c394a69f5aa185a782966e6f182bd9e031c601034a3/sqltrack-0.1.4-py3-none-any.whl",
            "yanked": false,
            "yanked_reason": null
        },
        {
            "comment_text": "",
            "digests": {
                "blake2b_256": "ba9a777c0d1278f5ad067a5c0e57842d2677ca1269a9d4e5e21324369c5fa395",
                "md5": "bdc57b01b6529cedb7099a5c4f65e12d",
                "sha256": "8177f097a0278051ea063df33cb33d399218a2fc3856a44a59f2012d4e93cf6a"
            },
            "downloads": -1,
            "filename": "sqltrack-0.1.4.tar.gz",
            "has_sig": false,
            "md5_digest": "bdc57b01b6529cedb7099a5c4f65e12d",
            "packagetype": "sdist",
            "python_version": "source",
            "requires_python": ">=3.7",
            "size": 27830,
            "upload_time": "2023-03-15T09:10:29",
            "upload_time_iso_8601": "2023-03-15T09:10:29.149629Z",
            "url": "https://files.pythonhosted.org/packages/ba/9a/777c0d1278f5ad067a5c0e57842d2677ca1269a9d4e5e21324369c5fa395/sqltrack-0.1.4.tar.gz",
            "yanked": false,
            "yanked_reason": null
        }
    ],
    "upload_time": "2023-03-15 09:10:29",
    "github": false,
    "gitlab": false,
    "bitbucket": false,
    "lcname": "sqltrack"
}
        
Elapsed time: 0.04400s