s3namic


Names3namic JSON
Version 0.0.5 PyPI version JSON
download
home_pagehttps://github.com/hyoj0942/s3namic
SummaryA Python package for managing AWS S3 bucket
upload_time2023-05-22 07:14:24
maintainer
docs_urlNone
authorJoey Kim
requires_python
licenseMIT
keywords aws s3 bucket management
VCS
bugtrack_url
requirements No requirements were recorded.
Travis-CI No Travis.
coveralls test coverage No coveralls.
            ## Installation

`pip install s3namic`

---

## Import module

```python
from s3namic import s3namic
s3 = s3namic(
  bucket="bucket_name",
  access_key="access_key",
  secret_key="secrey_key",
  region="region",
)
```

---

## Check S3 structure in tree form

```python
s3_tree = s3.make_tree(
    # with_file_name=True, # True if even the file name is included in the tree
    )

import json
s3_tree = json.dumps(s3_tree, indent=4, ensure_ascii=False)
print(s3_tree)
```

output:

```python
{
    "assets/": {
        "assets/backup/": {},
        "assets/batch_raw/": {
            "assets/batch_raw/batchData": {}
        },
        ...
}
```

---

## Check S3 structure in list form

```python
s3_list = s3.list_files()
print(s3_list[:5], "\n...\n", s3_list[-5:])
```

output:

```python
['first_file.json', 'second_file.json', ... ]
```

### Find a specific file in s3

- ### find_file

```python
test_path = s3.find_file(file_name="2021-12-31", str_contains=True)
print(f"2023-04-30 File path containing filename: '{test_path}'")
```

output:

```python
"2023-04-30 File path containing filename: 'assets/csv/2023-04-30.csv'"
```

- ### find_files

```python
prefix_path = test_path[:-len(test_path.split("/")[-1])]
test_files = s3.find_files(prefix=prefix_path)
print(f"Number of files under '{prefix_path}': {len(test_files)}")
```

output:

```python
"Number of files under 'assets/csv/': 112"
```

---

### Get from s3 to a specific path url

```python
print(s3.get_file_url(file_name=test_path, expires_in=3600)) # Expires in 3600 seconds (1 hour)
```

output:

```python
"https://bucket_name.s3.amazonaws.com/assets/csv/test.csv"
```

---

## CRUD from S3

### C, U(upload_file, \_write_file)

- Upload files to s3
- The upload_file method reads the file and uploads it to memory, and the `_write_file` method writes the file directly without reading it, so memory usage is small.
- Use `upload_file` to upload a file in your local to the s3 bucket, and use `_write_file` to directly save a variable to s3 in the code.
- The `write_csv`, `write_json`, `write_pkl`, `write_txt`, and `write_parquet` methods call the `_write_file` method to save the file according to the extension.

### R(\_read_file)

- read file from s3
- The `read_csv`, `read_json`, `read_pkl`, `read_txt`, and `read_parquet` methods call the `_read_file` method to read files according to the extension.
- The `read_auto` method calls the above methods according to the extension to read the file.
- The `read_thread` method speeds up the read_auto method by executing it multi-threaded.

### D(delete_file)

- Delete files from s3

### examples

- This example uses the csv extension, but json, pkl, txt, and parquet extensions can be used equally (refer to the above methods for usage).

```python
import pandas as pd

# Save variable to file (write_csv)
test_write_csv = pd.DataFrame({
    "test": [
        "ν•œκΈ€",
        "English",
        1234,
        "!@#$%^&*()_+",
        "πŸ˜€πŸ‘πŸ‘πŸ»πŸ‘πŸΌ"
    ]
})
# directly save the variable (dataframe)
s3.write_csv(file_name="assets/test/test_write.csv", file_content=test_write_csv, encoding="utf-8", index=False)
# Compress and save in gzip or bzip2 format
s3.write_csv(file_name="assets/test/test_write.csv.gz", file_content=test_write_csv, compression="gzip", encoding="utf-8", index=False)
s3.write_csv(file_name="assets/test/test_write.csv.bz2", file_content=test_write_csv, compression="bz2", encoding="utf-8", index=False)
```

```python
# Read the saved file (read_csv)
print(s3.read_csv.__doc__, end="\n====================\n")
pd.concat([
    s3.read_csv(file_name="assets/test/test_write.csv", encoding="utf-8").rename(columns={"test": "Basic format"}),
    # Read compressed files in gzip or bzip2 format
    s3.read_csv(file_name="assets/test/test_write.csv.gz", encoding="utf-8").rename(columns={"test": "gzip format"}),
    s3.read_csv(file_name="assets/test/test_write.csv.bz2", encoding="utf-8").rename(columns={"test": "bzip2 format"})
], axis=1)
```

output:

<div>

<table border="1" class="dataframe">
  <thead>
    <tr style="text-align: right;">
      <th></th>
      <th>Basic format</th>
      <th>gzip format</th>
      <th>bzip2 format</th>
    </tr>
  </thead>
  <tbody>
    <tr>
      <th>0</th>
      <td>ν•œκΈ€</td>
      <td>ν•œκΈ€</td>
      <td>ν•œκΈ€</td>
    </tr>
    <tr>
      <th>1</th>
      <td>English</td>
      <td>English</td>
      <td>English</td>
    </tr>
    <tr>
      <th>2</th>
      <td>1234</td>
      <td>1234</td>
      <td>1234</td>
    </tr>
    <tr>
      <th>3</th>
      <td>!@#$%^&amp;*()_+</td>
      <td>!@#$%^&amp;*()_+</td>
      <td>!@#$%^&amp;*()_+</td>
    </tr>
    <tr>
      <th>4</th>
      <td>πŸ˜€πŸ‘πŸ‘πŸ»πŸ‘πŸΌ</td>
      <td>πŸ˜€πŸ‘πŸ‘πŸ»πŸ‘πŸΌ</td>
      <td>πŸ˜€πŸ‘πŸ‘πŸ»πŸ‘πŸΌ</td>
    </tr>
  </tbody>
</table>
</div>

</br>

```python
# Download the saved file locally (download_file)
print(s3.download_file.__doc__, end="\n====================\n")
load_path = os.getcwd()
s3.download_file(file_name="assets/test/test_write.csv", load_path=load_path+"/test_write.csv")
s3.download_file(file_name="assets/test/test_write.csv.gz", load_path=load_path+"/test_write.csv.gz")
s3.download_file(file_name="assets/test/test_write.csv.bz2", load_path=load_path+"/test_write.csv.bz2")
```

```python
# Delete a file on s3 (delete_file)
print(s3.delete_file.__doc__, end="\n====================\n")
print(f"List of files before deletion: {s3.find_files(prefix='assets/test/')}")
s3.delete_file(file_name="assets/test/test_write.csv")
s3.delete_file(file_name="assets/test/test_write.csv.gz")
s3.delete_file(file_name="assets/test/test_write.csv.bz2")
print(f"List of files after deletion: {s3.find_files(prefix='assets/test/')}")
```

output:

```python
"List of files before deletion: ['assets/test/', 'assets/test/test.csv', 'assets/test/test.json', 'assets/test/test.parquet', 'assets/test/test.pickle', 'assets/test/test.pkl', 'assets/test/test.txt', 'assets/test/test_write.csv', 'assets/test/test_write.csv.bz2', 'assets/test/test_write.csv.gz']"
"List of files after deletion: ['assets/test/', 'assets/test/test.csv', 'assets/test/test.json', 'assets/test/test.parquet', 'assets/test/test.pickle', 'assets/test/test.pkl', 'assets/test/test.txt']"
```

</br>

```python
# Upload a file stored locally (upload_file)
print(s3.upload_file.__doc__, end="\n====================\n")
print(f"List of files before upload: {s3.find_files(prefix='assets/test/')}")
s3.upload_file(file_name="assets/test/test_write.csv", file_path=load_path+"/test_write.csv")
s3.upload_file(file_name="assets/test/test_write.csv.gz", file_path=load_path+"/test_write.csv.gz")
s3.upload_file(file_name="assets/test/test_write.csv.bz2", file_path=load_path+"/test_write.csv.bz2")
print(f"List of files after upload: {s3.find_files(prefix='assets/test/')}")
```

output:

```python
"List of files before upload: ['assets/test/', 'assets/test/test.csv', 'assets/test/test.json', 'assets/test/test.parquet', 'assets/test/test.pickle', 'assets/test/test.pkl', 'assets/test/test.txt']"
"List of files after upload: ['assets/test/', 'assets/test/test.csv', 'assets/test/test.json', 'assets/test/test.parquet', 'assets/test/test.pickle', 'assets/test/test.pkl', 'assets/test/test.txt', 'assets/test/test_write.csv', 'assets/test/test_write.csv.bz2', 'assets/test/test_write.csv.gz']"
```

```python
# Delete local files
os.remove(load_path+"/test_write.csv")
os.remove(load_path+"/test_write.csv.gz")
os.remove(load_path+"/test_write.csv.bz2")
```

---

### Methods that use CRUD in various ways

- **`read_auto`**
  - A method that executes one of `read_csv`, `read_excel`, `read_json`, `read_parquet`, and `read_pkl` depending on the file extension
  - You can automatically find the extension in the file name or specify the extension with the extension argument.<br><br>
- **`read_thread`**
  - Execute the `read_auto` method with multi-threading<br><br>
- **`compress`**, **`decompress`**
  - Compress and decompress files in s3 bucket and save as files
  - Using `_read_file()` method, `_write_file()` method<br><br>

<div>

<table border="1" class="dataframe">
  <thead>
    <tr style="text-align: right;">
      <th></th>
      <th>test</th>
    </tr>
  </thead>
  <tbody>
    <tr>
      <th>0</th>
      <td>ν•œκΈ€</td>
    </tr>
    <tr>
      <th>1</th>
      <td>English</td>
    </tr>
    <tr>
      <th>2</th>
      <td>1234</td>
    </tr>
    <tr>
      <th>3</th>
      <td>!@#$%^&amp;*()_+</td>
    </tr>
    <tr>
      <th>4</th>
      <td>πŸ˜€πŸ‘πŸ‘πŸ»πŸ‘πŸΌ</td>
    </tr>
  </tbody>
</table>
</div>

```python
auto_path = s3.find_file(file_name="2022-12", str_contains=True)  # File path with filename containing 2022-12
print(f"File path with filename containing 2023-04-30: {auto_path}")
# Just put the folder path as prefix
folder_path = auto_path[:auto_path.rfind('/')] + '/'
print(f"Folder path of the file path: {folder_path}")
print(f"Number of files in the folder: {len(s3.find_files(prefix=folder_path))}")
auto_file = s3.read_thread(prefix=folder_path, encoding="cp949", workers=os.cpu_count(), extension="csv")
print(f"Number of data frames of files in the folder (list type): {len(auto_file)}")
```

output:

```python
"File path with filename containing 2023-04-30: assets/csv/2023-04-30.csv"
"Folder path of the file path: assets/csv/"
"Number of files in the folder: 112"
"Number of data frames of files in the folder (list type): 112"
```

</br>

```python
s3.compress(file_name="assets/test/test_write.csv", compression="gzip")
s3.compress(file_name="assets/test/test_write.csv", compression="bz2")
s3.decompress(file_name="assets/test/test_write.csv.gz")
s3.decompress(file_name="assets/test/test_write.csv.bz2")
```

output:

```python
"The file assets/test/test_write.csv was compressed using gzip and saved as assets/test/test_write.csv.gz."
"The file assets/test/test_write.csv was compressed using bz2 and saved as assets/test/test_write.csv.bz2."
"The file assets/test/test_write.csv.gz was unzipped and saved as assets/test/test_write.csv."
"The file assets/test/test_write.csv.bz2 was unzipped and saved as assets/test/test_write.csv"
```

            

Raw data

            {
    "_id": null,
    "home_page": "https://github.com/hyoj0942/s3namic",
    "name": "s3namic",
    "maintainer": "",
    "docs_url": null,
    "requires_python": "",
    "maintainer_email": "",
    "keywords": "aws,s3,bucket,management",
    "author": "Joey Kim",
    "author_email": "hyoj0942@gmail.com",
    "download_url": "https://files.pythonhosted.org/packages/6c/ed/83a84a12bdad89d03de6f38d2d25eac7bbcc0e9a2355fe098b36ce7986d8/s3namic-0.0.5.tar.gz",
    "platform": null,
    "description": "## Installation\n\n`pip install s3namic`\n\n---\n\n## Import module\n\n```python\nfrom s3namic import s3namic\ns3 = s3namic(\n  bucket=\"bucket_name\",\n  access_key=\"access_key\",\n  secret_key=\"secrey_key\",\n  region=\"region\",\n)\n```\n\n---\n\n## Check S3 structure in tree form\n\n```python\ns3_tree = s3.make_tree(\n    # with_file_name=True, # True if even the file name is included in the tree\n    )\n\nimport json\ns3_tree = json.dumps(s3_tree, indent=4, ensure_ascii=False)\nprint(s3_tree)\n```\n\noutput:\n\n```python\n{\n    \"assets/\": {\n        \"assets/backup/\": {},\n        \"assets/batch_raw/\": {\n            \"assets/batch_raw/batchData\": {}\n        },\n        ...\n}\n```\n\n---\n\n## Check S3 structure in list form\n\n```python\ns3_list = s3.list_files()\nprint(s3_list[:5], \"\\n...\\n\", s3_list[-5:])\n```\n\noutput:\n\n```python\n['first_file.json', 'second_file.json', ... ]\n```\n\n### Find a specific file in s3\n\n- ### find_file\n\n```python\ntest_path = s3.find_file(file_name=\"2021-12-31\", str_contains=True)\nprint(f\"2023-04-30 File path containing filename: '{test_path}'\")\n```\n\noutput:\n\n```python\n\"2023-04-30 File path containing filename: 'assets/csv/2023-04-30.csv'\"\n```\n\n- ### find_files\n\n```python\nprefix_path = test_path[:-len(test_path.split(\"/\")[-1])]\ntest_files = s3.find_files(prefix=prefix_path)\nprint(f\"Number of files under '{prefix_path}': {len(test_files)}\")\n```\n\noutput:\n\n```python\n\"Number of files under 'assets/csv/': 112\"\n```\n\n---\n\n### Get from s3 to a specific path url\n\n```python\nprint(s3.get_file_url(file_name=test_path, expires_in=3600)) # Expires in 3600 seconds (1 hour)\n```\n\noutput:\n\n```python\n\"https://bucket_name.s3.amazonaws.com/assets/csv/test.csv\"\n```\n\n---\n\n## CRUD from S3\n\n### C, U(upload_file, \\_write_file)\n\n- Upload files to s3\n- The upload_file method reads the file and uploads it to memory, and the `_write_file` method writes the file directly without reading it, so memory usage is small.\n- Use `upload_file` to upload a file in your local to the s3 bucket, and use `_write_file` to directly save a variable to s3 in the code.\n- The `write_csv`, `write_json`, `write_pkl`, `write_txt`, and `write_parquet` methods call the `_write_file` method to save the file according to the extension.\n\n### R(\\_read_file)\n\n- read file from s3\n- The `read_csv`, `read_json`, `read_pkl`, `read_txt`, and `read_parquet` methods call the `_read_file` method to read files according to the extension.\n- The `read_auto` method calls the above methods according to the extension to read the file.\n- The `read_thread` method speeds up the read_auto method by executing it multi-threaded.\n\n### D(delete_file)\n\n- Delete files from s3\n\n### examples\n\n- This example uses the csv extension, but json, pkl, txt, and parquet extensions can be used equally (refer to the above methods for usage).\n\n```python\nimport pandas as pd\n\n# Save variable to file (write_csv)\ntest_write_csv = pd.DataFrame({\n    \"test\": [\n        \"\ud55c\uae00\",\n        \"English\",\n        1234,\n        \"!@#$%^&*()_+\",\n        \"\ud83d\ude00\ud83d\udc4d\ud83d\udc4d\ud83c\udffb\ud83d\udc4d\ud83c\udffc\"\n    ]\n})\n# directly save the variable (dataframe)\ns3.write_csv(file_name=\"assets/test/test_write.csv\", file_content=test_write_csv, encoding=\"utf-8\", index=False)\n# Compress and save in gzip or bzip2 format\ns3.write_csv(file_name=\"assets/test/test_write.csv.gz\", file_content=test_write_csv, compression=\"gzip\", encoding=\"utf-8\", index=False)\ns3.write_csv(file_name=\"assets/test/test_write.csv.bz2\", file_content=test_write_csv, compression=\"bz2\", encoding=\"utf-8\", index=False)\n```\n\n```python\n# Read the saved file (read_csv)\nprint(s3.read_csv.__doc__, end=\"\\n====================\\n\")\npd.concat([\n    s3.read_csv(file_name=\"assets/test/test_write.csv\", encoding=\"utf-8\").rename(columns={\"test\": \"Basic format\"}),\n    # Read compressed files in gzip or bzip2 format\n    s3.read_csv(file_name=\"assets/test/test_write.csv.gz\", encoding=\"utf-8\").rename(columns={\"test\": \"gzip format\"}),\n    s3.read_csv(file_name=\"assets/test/test_write.csv.bz2\", encoding=\"utf-8\").rename(columns={\"test\": \"bzip2 format\"})\n], axis=1)\n```\n\noutput:\n\n<div>\n\n<table border=\"1\" class=\"dataframe\">\n  <thead>\n    <tr style=\"text-align: right;\">\n      <th></th>\n      <th>Basic format</th>\n      <th>gzip format</th>\n      <th>bzip2 format</th>\n    </tr>\n  </thead>\n  <tbody>\n    <tr>\n      <th>0</th>\n      <td>\ud55c\uae00</td>\n      <td>\ud55c\uae00</td>\n      <td>\ud55c\uae00</td>\n    </tr>\n    <tr>\n      <th>1</th>\n      <td>English</td>\n      <td>English</td>\n      <td>English</td>\n    </tr>\n    <tr>\n      <th>2</th>\n      <td>1234</td>\n      <td>1234</td>\n      <td>1234</td>\n    </tr>\n    <tr>\n      <th>3</th>\n      <td>!@#$%^&amp;*()_+</td>\n      <td>!@#$%^&amp;*()_+</td>\n      <td>!@#$%^&amp;*()_+</td>\n    </tr>\n    <tr>\n      <th>4</th>\n      <td>\ud83d\ude00\ud83d\udc4d\ud83d\udc4d\ud83c\udffb\ud83d\udc4d\ud83c\udffc</td>\n      <td>\ud83d\ude00\ud83d\udc4d\ud83d\udc4d\ud83c\udffb\ud83d\udc4d\ud83c\udffc</td>\n      <td>\ud83d\ude00\ud83d\udc4d\ud83d\udc4d\ud83c\udffb\ud83d\udc4d\ud83c\udffc</td>\n    </tr>\n  </tbody>\n</table>\n</div>\n\n</br>\n\n```python\n# Download the saved file locally (download_file)\nprint(s3.download_file.__doc__, end=\"\\n====================\\n\")\nload_path = os.getcwd()\ns3.download_file(file_name=\"assets/test/test_write.csv\", load_path=load_path+\"/test_write.csv\")\ns3.download_file(file_name=\"assets/test/test_write.csv.gz\", load_path=load_path+\"/test_write.csv.gz\")\ns3.download_file(file_name=\"assets/test/test_write.csv.bz2\", load_path=load_path+\"/test_write.csv.bz2\")\n```\n\n```python\n# Delete a file on s3 (delete_file)\nprint(s3.delete_file.__doc__, end=\"\\n====================\\n\")\nprint(f\"List of files before deletion: {s3.find_files(prefix='assets/test/')}\")\ns3.delete_file(file_name=\"assets/test/test_write.csv\")\ns3.delete_file(file_name=\"assets/test/test_write.csv.gz\")\ns3.delete_file(file_name=\"assets/test/test_write.csv.bz2\")\nprint(f\"List of files after deletion: {s3.find_files(prefix='assets/test/')}\")\n```\n\noutput:\n\n```python\n\"List of files before deletion: ['assets/test/', 'assets/test/test.csv', 'assets/test/test.json', 'assets/test/test.parquet', 'assets/test/test.pickle', 'assets/test/test.pkl', 'assets/test/test.txt', 'assets/test/test_write.csv', 'assets/test/test_write.csv.bz2', 'assets/test/test_write.csv.gz']\"\n\"List of files after deletion: ['assets/test/', 'assets/test/test.csv', 'assets/test/test.json', 'assets/test/test.parquet', 'assets/test/test.pickle', 'assets/test/test.pkl', 'assets/test/test.txt']\"\n```\n\n</br>\n\n```python\n# Upload a file stored locally (upload_file)\nprint(s3.upload_file.__doc__, end=\"\\n====================\\n\")\nprint(f\"List of files before upload: {s3.find_files(prefix='assets/test/')}\")\ns3.upload_file(file_name=\"assets/test/test_write.csv\", file_path=load_path+\"/test_write.csv\")\ns3.upload_file(file_name=\"assets/test/test_write.csv.gz\", file_path=load_path+\"/test_write.csv.gz\")\ns3.upload_file(file_name=\"assets/test/test_write.csv.bz2\", file_path=load_path+\"/test_write.csv.bz2\")\nprint(f\"List of files after upload: {s3.find_files(prefix='assets/test/')}\")\n```\n\noutput:\n\n```python\n\"List of files before upload: ['assets/test/', 'assets/test/test.csv', 'assets/test/test.json', 'assets/test/test.parquet', 'assets/test/test.pickle', 'assets/test/test.pkl', 'assets/test/test.txt']\"\n\"List of files after upload: ['assets/test/', 'assets/test/test.csv', 'assets/test/test.json', 'assets/test/test.parquet', 'assets/test/test.pickle', 'assets/test/test.pkl', 'assets/test/test.txt', 'assets/test/test_write.csv', 'assets/test/test_write.csv.bz2', 'assets/test/test_write.csv.gz']\"\n```\n\n```python\n# Delete local files\nos.remove(load_path+\"/test_write.csv\")\nos.remove(load_path+\"/test_write.csv.gz\")\nos.remove(load_path+\"/test_write.csv.bz2\")\n```\n\n---\n\n### Methods that use CRUD in various ways\n\n- **`read_auto`**\n  - A method that executes one of `read_csv`, `read_excel`, `read_json`, `read_parquet`, and `read_pkl` depending on the file extension\n  - You can automatically find the extension in the file name or specify the extension with the extension argument.<br><br>\n- **`read_thread`**\n  - Execute the `read_auto` method with multi-threading<br><br>\n- **`compress`**, **`decompress`**\n  - Compress and decompress files in s3 bucket and save as files\n  - Using `_read_file()` method, `_write_file()` method<br><br>\n\n<div>\n\n<table border=\"1\" class=\"dataframe\">\n  <thead>\n    <tr style=\"text-align: right;\">\n      <th></th>\n      <th>test</th>\n    </tr>\n  </thead>\n  <tbody>\n    <tr>\n      <th>0</th>\n      <td>\ud55c\uae00</td>\n    </tr>\n    <tr>\n      <th>1</th>\n      <td>English</td>\n    </tr>\n    <tr>\n      <th>2</th>\n      <td>1234</td>\n    </tr>\n    <tr>\n      <th>3</th>\n      <td>!@#$%^&amp;*()_+</td>\n    </tr>\n    <tr>\n      <th>4</th>\n      <td>\ud83d\ude00\ud83d\udc4d\ud83d\udc4d\ud83c\udffb\ud83d\udc4d\ud83c\udffc</td>\n    </tr>\n  </tbody>\n</table>\n</div>\n\n```python\nauto_path = s3.find_file(file_name=\"2022-12\", str_contains=True)  # File path with filename containing 2022-12\nprint(f\"File path with filename containing 2023-04-30: {auto_path}\")\n# Just put the folder path as prefix\nfolder_path = auto_path[:auto_path.rfind('/')] + '/'\nprint(f\"Folder path of the file path: {folder_path}\")\nprint(f\"Number of files in the folder: {len(s3.find_files(prefix=folder_path))}\")\nauto_file = s3.read_thread(prefix=folder_path, encoding=\"cp949\", workers=os.cpu_count(), extension=\"csv\")\nprint(f\"Number of data frames of files in the folder (list type): {len(auto_file)}\")\n```\n\noutput:\n\n```python\n\"File path with filename containing 2023-04-30: assets/csv/2023-04-30.csv\"\n\"Folder path of the file path: assets/csv/\"\n\"Number of files in the folder: 112\"\n\"Number of data frames of files in the folder (list type): 112\"\n```\n\n</br>\n\n```python\ns3.compress(file_name=\"assets/test/test_write.csv\", compression=\"gzip\")\ns3.compress(file_name=\"assets/test/test_write.csv\", compression=\"bz2\")\ns3.decompress(file_name=\"assets/test/test_write.csv.gz\")\ns3.decompress(file_name=\"assets/test/test_write.csv.bz2\")\n```\n\noutput:\n\n```python\n\"The file assets/test/test_write.csv was compressed using gzip and saved as assets/test/test_write.csv.gz.\"\n\"The file assets/test/test_write.csv was compressed using bz2 and saved as assets/test/test_write.csv.bz2.\"\n\"The file assets/test/test_write.csv.gz was unzipped and saved as assets/test/test_write.csv.\"\n\"The file assets/test/test_write.csv.bz2 was unzipped and saved as assets/test/test_write.csv\"\n```\n",
    "bugtrack_url": null,
    "license": "MIT",
    "summary": "A Python package for managing AWS S3 bucket",
    "version": "0.0.5",
    "project_urls": {
        "Homepage": "https://github.com/hyoj0942/s3namic"
    },
    "split_keywords": [
        "aws",
        "s3",
        "bucket",
        "management"
    ],
    "urls": [
        {
            "comment_text": "",
            "digests": {
                "blake2b_256": "2ab11d540cca7b72a59599ef29713da92f5648f7264f2e52f44ccca6d70b674c",
                "md5": "ef4c8c6b8ff47cfed13ee0caa52010e6",
                "sha256": "a81b42f09694cd60a485d6128fa633f11823b86f9eae0818ad2442811f06db33"
            },
            "downloads": -1,
            "filename": "s3namic-0.0.5-py3-none-any.whl",
            "has_sig": false,
            "md5_digest": "ef4c8c6b8ff47cfed13ee0caa52010e6",
            "packagetype": "bdist_wheel",
            "python_version": "py3",
            "requires_python": null,
            "size": 14172,
            "upload_time": "2023-05-22T07:14:20",
            "upload_time_iso_8601": "2023-05-22T07:14:20.645549Z",
            "url": "https://files.pythonhosted.org/packages/2a/b1/1d540cca7b72a59599ef29713da92f5648f7264f2e52f44ccca6d70b674c/s3namic-0.0.5-py3-none-any.whl",
            "yanked": false,
            "yanked_reason": null
        },
        {
            "comment_text": "",
            "digests": {
                "blake2b_256": "6ced83a84a12bdad89d03de6f38d2d25eac7bbcc0e9a2355fe098b36ce7986d8",
                "md5": "b2800c58437cdd88f6858af0b48ada5b",
                "sha256": "b4de3ae9318e254988deb5d91f2a7c3f2f06b147a6014f4a1c5e5e4a174f7c44"
            },
            "downloads": -1,
            "filename": "s3namic-0.0.5.tar.gz",
            "has_sig": false,
            "md5_digest": "b2800c58437cdd88f6858af0b48ada5b",
            "packagetype": "sdist",
            "python_version": "source",
            "requires_python": null,
            "size": 11907,
            "upload_time": "2023-05-22T07:14:24",
            "upload_time_iso_8601": "2023-05-22T07:14:24.506729Z",
            "url": "https://files.pythonhosted.org/packages/6c/ed/83a84a12bdad89d03de6f38d2d25eac7bbcc0e9a2355fe098b36ce7986d8/s3namic-0.0.5.tar.gz",
            "yanked": false,
            "yanked_reason": null
        }
    ],
    "upload_time": "2023-05-22 07:14:24",
    "github": true,
    "gitlab": false,
    "bitbucket": false,
    "codeberg": false,
    "github_user": "hyoj0942",
    "github_project": "s3namic",
    "travis_ci": false,
    "coveralls": false,
    "github_actions": false,
    "lcname": "s3namic"
}
        
Elapsed time: 0.10522s