# 🚀 UnrealOn v2.0
**The easiest way to build production-ready web scrapers in Python.**
[](https://badge.fury.io/py/unrealon)
[](https://www.python.org/downloads/)
[](https://opensource.org/licenses/MIT)
## 🎯 Why UnrealOn?
**Stop fighting infrastructure. Start building parsers.**
```python
# Just focus on YOUR parsing logic
class MyParser:
def __init__(self, driver):
self.driver = driver
async def parse_products(self, url: str):
html = await self.driver.http.get_html(url) # Auto proxy, retries, etc.
soup = BeautifulSoup(html, 'html.parser')
products = []
for item in soup.select('.product'):
products.append({
'title': item.select_one('.title').text,
'price': item.select_one('.price').text
})
return products # Auto-saved, logged, monitored
# Everything else is handled automatically!
```
**What you get for free:**
- ✅ **HTTP Client** with proxy rotation, retries, rate limiting
- ✅ **Browser Automation** with stealth mode and anti-detection
- ✅ **Error Handling** with automatic retries and graceful failures
- ✅ **Logging & Monitoring** with structured logs and performance metrics
- ✅ **CLI Interface** auto-generated from your parser methods
- ✅ **Configuration** with YAML files and validation
- ✅ **Production Deployment** with RPC server integration
---
## 📦 Installation
```bash
pip install unrealon
```
That's it! No complex setup, no configuration files to write.
---
## 🚀 Quick Start
### 1. Create Your Parser
```python
# my_parser.py
from unrealon_driver import UniversalDriver, DriverConfig
from bs4 import BeautifulSoup
class MyWebsiteParser:
def __init__(self, driver: UniversalDriver):
self.driver = driver
async def parse_products(self, search_query: str):
"""Parse products from search results."""
# UnrealOn handles all the HTTP complexity
url = f"https://example.com/search?q={search_query}"
html = await self.driver.http.get_html(url)
# Focus on YOUR parsing logic
soup = BeautifulSoup(html, 'html.parser')
products = []
for item in soup.select('.product-item'):
product = {
'title': item.select_one('.title').text.strip(),
'price': item.select_one('.price').text.strip(),
'url': item.select_one('a')['href']
}
products.append(product)
# Results are automatically saved and logged
await self.driver.logger.info(f"Found {len(products)} products")
return products
# Setup (one time)
config = DriverConfig.for_development("my_parser")
driver = UniversalDriver(config)
parser = MyWebsiteParser(driver)
# Use it
await driver.initialize()
results = await parser.parse_products("laptop")
print(f"Found {len(results)} products!")
```
### 2. Add CLI Interface (Optional)
```python
# cli.py
import click
from my_parser import MyWebsiteParser, driver
@click.command()
@click.option('--query', required=True, help='Search query')
@click.option('--limit', default=10, help='Max results')
def search(query: str, limit: int):
"""Search for products."""
async def run():
await driver.initialize()
parser = MyWebsiteParser(driver)
results = await parser.parse_products(query)
for i, product in enumerate(results[:limit], 1):
print(f"{i}. {product['title']} - {product['price']}")
await driver.shutdown()
import asyncio
asyncio.run(run())
if __name__ == '__main__':
search()
```
```bash
# Now you have a CLI!
python cli.py search --query "laptop" --limit 5
```
### 3. Production Deployment (Optional)
```python
# For production, just add RPC task decorators
class ProductionParser(UniversalDriver):
def __init__(self):
super().__init__(DriverConfig.for_production("my_parser"))
self.parser = MyWebsiteParser(self)
@self.task("parse_products") # Auto-registered RPC task
async def parse_products_task(self, task_data):
query = task_data.parameters['query']
results = await self.parser.parse_products(query)
return TaskResultData(
task_id=task_data.task_id,
status="completed",
result=results
)
# Deploy and scale automatically!
```
---
## 🎨 Features
### 🔧 HTTP Client (Built-in)
```python
# All of this is handled automatically:
html = await driver.http.get_html(url)
# ✅ Proxy rotation
# ✅ User-Agent rotation
# ✅ Automatic retries
# ✅ Rate limiting
# ✅ Cookie management
# ✅ Session persistence
```
### 🌐 Browser Automation (Built-in)
```python
# When you need a real browser:
page = await driver.browser.get_page(url)
await page.click('.load-more')
html = await page.content()
# ✅ Stealth mode
# ✅ Anti-detection
# ✅ JavaScript execution
# ✅ Screenshot capture
```
### 📊 Monitoring & Logging (Built-in)
```python
# Structured logging that just works:
await driver.logger.info("Starting parse", extra={
'url': url,
'products_found': len(products)
})
# ✅ Centralized logging
# ✅ Performance metrics
# ✅ Error tracking
# ✅ Real-time monitoring
```
### ⚙️ Configuration (Built-in)
```yaml
# config.yaml - Simple YAML configuration
parser:
name: "My Parser"
max_pages: 10
http:
request_delay: 1.0
max_retries: 3
output:
format: json
directory: ./results
```
### 🚀 Production Scaling (Built-in)
```python
# Scale to multiple instances automatically
@driver.task("parse_category")
async def parse_category_task(self, task_data):
# This runs on any available parser instance
category = task_data.parameters['category']
return await self.parse_category(category)
# Deploy multiple instances, they auto-coordinate!
```
---
## 📚 Examples
### E-commerce Parser
```python
class EcommerceParser:
async def parse_product(self, product_url: str):
html = await self.driver.http.get_html(product_url)
soup = BeautifulSoup(html, 'html.parser')
return {
'title': soup.select_one('h1').text,
'price': soup.select_one('.price').text,
'description': soup.select_one('.description').text,
'images': [img['src'] for img in soup.select('.gallery img')]
}
```
### News Scraper
```python
class NewsParser:
async def parse_articles(self, category: str):
url = f"https://news-site.com/{category}"
html = await self.driver.http.get_html(url)
soup = BeautifulSoup(html, 'html.parser')
articles = []
for article in soup.select('.article'):
articles.append({
'headline': article.select_one('.headline').text,
'summary': article.select_one('.summary').text,
'published': article.select_one('.date').text,
'url': article.select_one('a')['href']
})
return articles
```
### Real Estate Listings
```python
class RealEstateParser:
async def parse_listings(self, city: str, max_price: int):
url = f"https://realestate.com/search?city={city}&max_price={max_price}"
# Use browser for JavaScript-heavy sites
page = await self.driver.browser.get_page(url)
await page.wait_for_selector('.listing')
listings = await page.evaluate('''
() => Array.from(document.querySelectorAll('.listing')).map(listing => ({
address: listing.querySelector('.address').textContent,
price: listing.querySelector('.price').textContent,
bedrooms: listing.querySelector('.bedrooms').textContent,
url: listing.querySelector('a').href
}))
''')
return listings
```
---
## 🆚 Why Not Scrapy/BeautifulSoup/Selenium?
| Feature | UnrealOn | Scrapy | BeautifulSoup + Requests | Selenium |
|---------|----------|--------|-------------------------|----------|
| **Setup Time** | ✅ 5 minutes | ❌ Hours | ❌ Hours | ❌ Hours |
| **Proxy Rotation** | ✅ Built-in | ❌ Manual setup | ❌ Manual setup | ❌ Manual setup |
| **Anti-Detection** | ✅ Built-in | ❌ Manual setup | ❌ Manual setup | ❌ Partial |
| **Error Handling** | ✅ Built-in | ❌ Manual setup | ❌ Manual setup | ❌ Manual setup |
| **Monitoring** | ✅ Built-in | ❌ Manual setup | ❌ Manual setup | ❌ Manual setup |
| **CLI Generation** | ✅ Automatic | ❌ Manual | ❌ Manual | ❌ Manual |
| **Production Deploy** | ✅ Built-in | ❌ Complex | ❌ Very complex | ❌ Very complex |
| **Learning Curve** | ✅ Minimal | ❌ Steep | ❌ Medium | ❌ Steep |
**UnrealOn = All the power, none of the setup.**
---
## 🛠️ Advanced Features
### Type-Safe Data Models
```python
from pydantic import BaseModel
from typing import Optional
class Product(BaseModel):
title: str
price: Optional[float] = None
url: str
in_stock: bool = True
# Automatic validation and serialization
product = Product(title="Laptop", price=999.99, url="https://...")
```
### Scheduled Tasks
```python
@driver.schedule("0 */6 * * *") # Every 6 hours
async def monitor_prices():
"""Monitor price changes automatically."""
products = await parser.parse_products("laptop")
# Check for price drops, send alerts, etc.
```
### Batch Processing
```python
async def parse_multiple_categories():
categories = ["electronics", "books", "clothing"]
# Process all categories concurrently
tasks = [parser.parse_category(cat) for cat in categories]
results = await driver.threads.submit_batch(tasks, max_workers=3)
return results
```
---
## 🚀 Getting Started
1. **Install**: `pip install unrealon`
2. **Create parser**: Write your parsing logic (focus on the scraping, not infrastructure)
3. **Run**: `python my_parser.py`
4. **Scale**: Add `@driver.task` decorators for production
### Complete Example Repository
- **[Amazon Parser](https://github.com/markolofsen/unrealon-parser-amazon)** - Production-ready Amazon scraper
---
## 📚 Documentation
- **[GitHub Repository](https://github.com/markolofsen/unrealon-parser-amazon)** - Source code and examples
- **[API Documentation](https://unrealon.com)** - Full API reference
---
## 🎉 Success Stories
### 🚗 CarAPIs - Automotive Data Platform
**[carapis.com](https://carapis.com)** - Vehicle listings from 50+ dealerships
*"Went from prototype to production in 2 days with UnrealOn"*
### 🛒 ShopAPIs - E-commerce Intelligence
**[shopapis.com](https://shopapis.com)** - Price monitoring across 100+ stores
*"Handles 1M+ products daily with zero maintenance"*
### 📊 StockAPIs - Financial Data Platform
**[stockapis.com](https://stockapis.com)** - Real-time market data collection
*"Rock-solid reliability for financial data that can't afford downtime"*
---
## 📄 License
MIT License - Use it however you want!
---
**Stop building infrastructure. Start building parsers.** 🚀
Raw data
{
"_id": null,
"home_page": null,
"name": "unrealon",
"maintainer": null,
"docs_url": null,
"requires_python": "<4.0,>=3.10",
"maintainer_email": null,
"keywords": "web-scraping, automation, ai, browser, driver, parsing, orchestration",
"author": null,
"author_email": "UnrealOn Team <team@unrealon.com>",
"download_url": "https://files.pythonhosted.org/packages/44/9c/b85fc25ee39e3610d2043d7b580cdd03de74b17aebeca8252260283a8194/unrealon-2.0.34.tar.gz",
"platform": null,
"description": "# \ud83d\ude80 UnrealOn v2.0\n\n**The easiest way to build production-ready web scrapers in Python.**\n\n[](https://badge.fury.io/py/unrealon)\n[](https://www.python.org/downloads/)\n[](https://opensource.org/licenses/MIT)\n\n## \ud83c\udfaf Why UnrealOn?\n\n**Stop fighting infrastructure. Start building parsers.**\n\n```python\n# Just focus on YOUR parsing logic\nclass MyParser:\n def __init__(self, driver):\n self.driver = driver\n \n async def parse_products(self, url: str):\n html = await self.driver.http.get_html(url) # Auto proxy, retries, etc.\n soup = BeautifulSoup(html, 'html.parser')\n \n products = []\n for item in soup.select('.product'):\n products.append({\n 'title': item.select_one('.title').text,\n 'price': item.select_one('.price').text\n })\n \n return products # Auto-saved, logged, monitored\n\n# Everything else is handled automatically!\n```\n\n**What you get for free:**\n- \u2705 **HTTP Client** with proxy rotation, retries, rate limiting\n- \u2705 **Browser Automation** with stealth mode and anti-detection \n- \u2705 **Error Handling** with automatic retries and graceful failures\n- \u2705 **Logging & Monitoring** with structured logs and performance metrics\n- \u2705 **CLI Interface** auto-generated from your parser methods\n- \u2705 **Configuration** with YAML files and validation\n- \u2705 **Production Deployment** with RPC server integration\n\n---\n\n## \ud83d\udce6 Installation\n\n```bash\npip install unrealon\n```\n\nThat's it! No complex setup, no configuration files to write.\n\n---\n\n## \ud83d\ude80 Quick Start\n\n### 1. Create Your Parser\n\n```python\n# my_parser.py\nfrom unrealon_driver import UniversalDriver, DriverConfig\nfrom bs4 import BeautifulSoup\n\nclass MyWebsiteParser:\n def __init__(self, driver: UniversalDriver):\n self.driver = driver\n \n async def parse_products(self, search_query: str):\n \"\"\"Parse products from search results.\"\"\"\n \n # UnrealOn handles all the HTTP complexity\n url = f\"https://example.com/search?q={search_query}\"\n html = await self.driver.http.get_html(url)\n \n # Focus on YOUR parsing logic\n soup = BeautifulSoup(html, 'html.parser')\n products = []\n \n for item in soup.select('.product-item'):\n product = {\n 'title': item.select_one('.title').text.strip(),\n 'price': item.select_one('.price').text.strip(),\n 'url': item.select_one('a')['href']\n }\n products.append(product)\n \n # Results are automatically saved and logged\n await self.driver.logger.info(f\"Found {len(products)} products\")\n return products\n\n# Setup (one time)\nconfig = DriverConfig.for_development(\"my_parser\")\ndriver = UniversalDriver(config)\nparser = MyWebsiteParser(driver)\n\n# Use it\nawait driver.initialize()\nresults = await parser.parse_products(\"laptop\")\nprint(f\"Found {len(results)} products!\")\n```\n\n### 2. Add CLI Interface (Optional)\n\n```python\n# cli.py\nimport click\nfrom my_parser import MyWebsiteParser, driver\n\n@click.command()\n@click.option('--query', required=True, help='Search query')\n@click.option('--limit', default=10, help='Max results')\ndef search(query: str, limit: int):\n \"\"\"Search for products.\"\"\"\n \n async def run():\n await driver.initialize()\n parser = MyWebsiteParser(driver)\n results = await parser.parse_products(query)\n \n for i, product in enumerate(results[:limit], 1):\n print(f\"{i}. {product['title']} - {product['price']}\")\n \n await driver.shutdown()\n \n import asyncio\n asyncio.run(run())\n\nif __name__ == '__main__':\n search()\n```\n\n```bash\n# Now you have a CLI!\npython cli.py search --query \"laptop\" --limit 5\n```\n\n### 3. Production Deployment (Optional)\n\n```python\n# For production, just add RPC task decorators\nclass ProductionParser(UniversalDriver):\n def __init__(self):\n super().__init__(DriverConfig.for_production(\"my_parser\"))\n self.parser = MyWebsiteParser(self)\n \n @self.task(\"parse_products\") # Auto-registered RPC task\n async def parse_products_task(self, task_data):\n query = task_data.parameters['query']\n results = await self.parser.parse_products(query)\n \n return TaskResultData(\n task_id=task_data.task_id,\n status=\"completed\",\n result=results\n )\n\n# Deploy and scale automatically!\n```\n\n---\n\n## \ud83c\udfa8 Features\n\n### \ud83d\udd27 HTTP Client (Built-in)\n```python\n# All of this is handled automatically:\nhtml = await driver.http.get_html(url)\n# \u2705 Proxy rotation\n# \u2705 User-Agent rotation \n# \u2705 Automatic retries\n# \u2705 Rate limiting\n# \u2705 Cookie management\n# \u2705 Session persistence\n```\n\n### \ud83c\udf10 Browser Automation (Built-in)\n```python\n# When you need a real browser:\npage = await driver.browser.get_page(url)\nawait page.click('.load-more')\nhtml = await page.content()\n# \u2705 Stealth mode\n# \u2705 Anti-detection\n# \u2705 JavaScript execution\n# \u2705 Screenshot capture\n```\n\n### \ud83d\udcca Monitoring & Logging (Built-in)\n```python\n# Structured logging that just works:\nawait driver.logger.info(\"Starting parse\", extra={\n 'url': url,\n 'products_found': len(products)\n})\n# \u2705 Centralized logging\n# \u2705 Performance metrics\n# \u2705 Error tracking\n# \u2705 Real-time monitoring\n```\n\n### \u2699\ufe0f Configuration (Built-in)\n```yaml\n# config.yaml - Simple YAML configuration\nparser:\n name: \"My Parser\"\n max_pages: 10\n \nhttp:\n request_delay: 1.0\n max_retries: 3\n \noutput:\n format: json\n directory: ./results\n```\n\n### \ud83d\ude80 Production Scaling (Built-in)\n```python\n# Scale to multiple instances automatically\n@driver.task(\"parse_category\")\nasync def parse_category_task(self, task_data):\n # This runs on any available parser instance\n category = task_data.parameters['category']\n return await self.parse_category(category)\n\n# Deploy multiple instances, they auto-coordinate!\n```\n\n---\n\n## \ud83d\udcda Examples\n\n### E-commerce Parser\n```python\nclass EcommerceParser:\n async def parse_product(self, product_url: str):\n html = await self.driver.http.get_html(product_url)\n soup = BeautifulSoup(html, 'html.parser')\n \n return {\n 'title': soup.select_one('h1').text,\n 'price': soup.select_one('.price').text,\n 'description': soup.select_one('.description').text,\n 'images': [img['src'] for img in soup.select('.gallery img')]\n }\n```\n\n### News Scraper\n```python\nclass NewsParser:\n async def parse_articles(self, category: str):\n url = f\"https://news-site.com/{category}\"\n html = await self.driver.http.get_html(url)\n soup = BeautifulSoup(html, 'html.parser')\n \n articles = []\n for article in soup.select('.article'):\n articles.append({\n 'headline': article.select_one('.headline').text,\n 'summary': article.select_one('.summary').text,\n 'published': article.select_one('.date').text,\n 'url': article.select_one('a')['href']\n })\n \n return articles\n```\n\n### Real Estate Listings\n```python\nclass RealEstateParser:\n async def parse_listings(self, city: str, max_price: int):\n url = f\"https://realestate.com/search?city={city}&max_price={max_price}\"\n \n # Use browser for JavaScript-heavy sites\n page = await self.driver.browser.get_page(url)\n await page.wait_for_selector('.listing')\n \n listings = await page.evaluate('''\n () => Array.from(document.querySelectorAll('.listing')).map(listing => ({\n address: listing.querySelector('.address').textContent,\n price: listing.querySelector('.price').textContent,\n bedrooms: listing.querySelector('.bedrooms').textContent,\n url: listing.querySelector('a').href\n }))\n ''')\n \n return listings\n```\n\n---\n\n## \ud83c\udd9a Why Not Scrapy/BeautifulSoup/Selenium?\n\n| Feature | UnrealOn | Scrapy | BeautifulSoup + Requests | Selenium |\n|---------|----------|--------|-------------------------|----------|\n| **Setup Time** | \u2705 5 minutes | \u274c Hours | \u274c Hours | \u274c Hours |\n| **Proxy Rotation** | \u2705 Built-in | \u274c Manual setup | \u274c Manual setup | \u274c Manual setup |\n| **Anti-Detection** | \u2705 Built-in | \u274c Manual setup | \u274c Manual setup | \u274c Partial |\n| **Error Handling** | \u2705 Built-in | \u274c Manual setup | \u274c Manual setup | \u274c Manual setup |\n| **Monitoring** | \u2705 Built-in | \u274c Manual setup | \u274c Manual setup | \u274c Manual setup |\n| **CLI Generation** | \u2705 Automatic | \u274c Manual | \u274c Manual | \u274c Manual |\n| **Production Deploy** | \u2705 Built-in | \u274c Complex | \u274c Very complex | \u274c Very complex |\n| **Learning Curve** | \u2705 Minimal | \u274c Steep | \u274c Medium | \u274c Steep |\n\n**UnrealOn = All the power, none of the setup.**\n\n---\n\n## \ud83d\udee0\ufe0f Advanced Features\n\n### Type-Safe Data Models\n```python\nfrom pydantic import BaseModel\nfrom typing import Optional\n\nclass Product(BaseModel):\n title: str\n price: Optional[float] = None\n url: str\n in_stock: bool = True\n\n# Automatic validation and serialization\nproduct = Product(title=\"Laptop\", price=999.99, url=\"https://...\")\n```\n\n### Scheduled Tasks\n```python\n@driver.schedule(\"0 */6 * * *\") # Every 6 hours\nasync def monitor_prices():\n \"\"\"Monitor price changes automatically.\"\"\"\n products = await parser.parse_products(\"laptop\")\n # Check for price drops, send alerts, etc.\n```\n\n### Batch Processing\n```python\nasync def parse_multiple_categories():\n categories = [\"electronics\", \"books\", \"clothing\"]\n \n # Process all categories concurrently\n tasks = [parser.parse_category(cat) for cat in categories]\n results = await driver.threads.submit_batch(tasks, max_workers=3)\n \n return results\n```\n\n---\n\n## \ud83d\ude80 Getting Started\n\n1. **Install**: `pip install unrealon`\n2. **Create parser**: Write your parsing logic (focus on the scraping, not infrastructure)\n3. **Run**: `python my_parser.py` \n4. **Scale**: Add `@driver.task` decorators for production\n\n### Complete Example Repository\n- **[Amazon Parser](https://github.com/markolofsen/unrealon-parser-amazon)** - Production-ready Amazon scraper\n\n---\n\n## \ud83d\udcda Documentation\n\n- **[GitHub Repository](https://github.com/markolofsen/unrealon-parser-amazon)** - Source code and examples\n- **[API Documentation](https://unrealon.com)** - Full API reference\n\n---\n\n## \ud83c\udf89 Success Stories\n\n### \ud83d\ude97 CarAPIs - Automotive Data Platform\n**[carapis.com](https://carapis.com)** - Vehicle listings from 50+ dealerships \n*\"Went from prototype to production in 2 days with UnrealOn\"*\n\n### \ud83d\uded2 ShopAPIs - E-commerce Intelligence \n**[shopapis.com](https://shopapis.com)** - Price monitoring across 100+ stores \n*\"Handles 1M+ products daily with zero maintenance\"*\n\n### \ud83d\udcca StockAPIs - Financial Data Platform\n**[stockapis.com](https://stockapis.com)** - Real-time market data collection \n*\"Rock-solid reliability for financial data that can't afford downtime\"*\n\n---\n\n## \ud83d\udcc4 License\n\nMIT License - Use it however you want!\n\n---\n\n**Stop building infrastructure. Start building parsers.** \ud83d\ude80\n",
"bugtrack_url": null,
"license": "MIT",
"summary": "Enterprise-grade web scraping platform with AI-powered automation and real-time orchestration capabilities",
"version": "2.0.34",
"project_urls": {
"Documentation": "https://unrealon.com",
"Homepage": "https://unrealon.com",
"Repository": "https://github.com/markolofsen/unrealon-parser-amazon"
},
"split_keywords": [
"web-scraping",
" automation",
" ai",
" browser",
" driver",
" parsing",
" orchestration"
],
"urls": [
{
"comment_text": "",
"digests": {
"blake2b_256": "bb1bb9f8ad36d7753e92e61ece0d3c15024c665e583a541bfdbd8357b048ea1a",
"md5": "33e7fe3ff8289c313a4e97a246fbba42",
"sha256": "686c32cf6704859e2bae7dadfe081a30b7c65c0c5e160cb095d331852fc6c76c"
},
"downloads": -1,
"filename": "unrealon-2.0.34-py3-none-any.whl",
"has_sig": false,
"md5_digest": "33e7fe3ff8289c313a4e97a246fbba42",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": "<4.0,>=3.10",
"size": 199920,
"upload_time": "2025-09-11T17:28:28",
"upload_time_iso_8601": "2025-09-11T17:28:28.573377Z",
"url": "https://files.pythonhosted.org/packages/bb/1b/b9f8ad36d7753e92e61ece0d3c15024c665e583a541bfdbd8357b048ea1a/unrealon-2.0.34-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": "",
"digests": {
"blake2b_256": "449cb85fc25ee39e3610d2043d7b580cdd03de74b17aebeca8252260283a8194",
"md5": "c8d9097efd070c0a6e68b9e8f1e6c74f",
"sha256": "80ca8c63d7ce5134d4209c475255eadc2046f1babe97457c183f466f464721d4"
},
"downloads": -1,
"filename": "unrealon-2.0.34.tar.gz",
"has_sig": false,
"md5_digest": "c8d9097efd070c0a6e68b9e8f1e6c74f",
"packagetype": "sdist",
"python_version": "source",
"requires_python": "<4.0,>=3.10",
"size": 149102,
"upload_time": "2025-09-11T17:28:32",
"upload_time_iso_8601": "2025-09-11T17:28:32.464981Z",
"url": "https://files.pythonhosted.org/packages/44/9c/b85fc25ee39e3610d2043d7b580cdd03de74b17aebeca8252260283a8194/unrealon-2.0.34.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2025-09-11 17:28:32",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "markolofsen",
"github_project": "unrealon-parser-amazon",
"travis_ci": false,
"coveralls": false,
"github_actions": false,
"lcname": "unrealon"
}