# lawkit-python
Python wrapper for the `lawkit` CLI tool - Statistical law analysis toolkit for fraud detection and data quality assessment.
## Installation
```bash
pip install lawkit-python
```
This will automatically download the appropriate `lawkit` binary for your system from GitHub Releases.
## Quick Start
```python
import lawkit
# Analyze financial data with Benford Law
result = lawkit.analyze_benford('financial_data.csv')
print(result)
# Get structured JSON output
json_result = lawkit.analyze_benford(
'accounting.csv',
lawkit.LawkitOptions(format='json')
)
print(f"Risk level: {json_result.risk_level}")
print(f"P-value: {json_result.p_value}")
# Check if data follows Pareto principle (80/20 rule)
pareto_result = lawkit.analyze_pareto(
'sales_data.csv',
lawkit.LawkitOptions(format='json', gini_coefficient=True)
)
print(f"Gini coefficient: {pareto_result.gini_coefficient}")
print(f"80/20 concentration: {pareto_result.concentration_80_20}")
```
## Features
### Statistical Laws Supported
- **Benford Law**: Detect fraud and anomalies in numerical data
- **Pareto Principle**: Analyze 80/20 distributions and concentration
- **Zipf Law**: Analyze word frequencies and power-law distributions
- **Normal Distribution**: Test for normality and detect outliers
- **Poisson Distribution**: Analyze rare events and count data
### Advanced Analysis
- **Multi-law Comparison**: Compare multiple statistical laws on the same data
- **Outlier Detection**: Advanced anomaly detection algorithms
- **Time Series Analysis**: Trend and seasonality detection
- **International Numbers**: Support for various number formats (Japanese, Chinese, etc.)
- **Memory Efficient**: Handle large datasets with streaming analysis
### File Format Support
- **CSV, JSON, YAML, TOML, XML**: Standard structured data formats
- **Excel Files**: `.xlsx` and `.xls` support
- **PDF Documents**: Extract and analyze numerical data from PDFs
- **Word Documents**: Analyze data from `.docx` files
- **PowerPoint**: Extract data from presentations
## Usage Examples
### Modern API (Recommended)
```python
import lawkit
# Analyze with Benford Law
result = lawkit.analyze_benford('invoice_data.csv')
print(result)
# Get detailed JSON analysis
json_result = lawkit.analyze_benford(
'financial_statements.xlsx',
lawkit.LawkitOptions(
format='excel',
output='json',
confidence=0.95,
verbose=True
)
)
if json_result.risk_level == "High":
print("⚠️ High risk of fraud detected!")
print(f"Chi-square: {json_result.chi_square}")
print(f"P-value: {json_result.p_value}")
print(f"MAD: {json_result.mad}%")
# Pareto analysis for business insights
pareto_result = lawkit.analyze_pareto(
'customer_revenue.csv',
lawkit.LawkitOptions(
output='json',
gini_coefficient=True,
business_analysis=True,
percentiles="70,80,90"
)
)
print(f"Top 20% customers generate {pareto_result.concentration_80_20:.1f}% of revenue")
print(f"Income inequality (Gini): {pareto_result.gini_coefficient:.3f}")
# Normal distribution analysis with outlier detection
normal_result = lawkit.analyze_normal(
'quality_measurements.csv',
lawkit.LawkitOptions(
output='json',
outlier_detection=True,
test_type='shapiro'
)
)
if normal_result.p_value < 0.05:
print("Data does not follow normal distribution")
if normal_result.outliers:
print(f"Found {len(normal_result.outliers)} outliers")
# Multi-law analysis
analysis = lawkit.analyze_laws(
'complex_dataset.csv',
lawkit.LawkitOptions(format='json', laws='benf,pareto,zipf')
)
print(f"Analysis results: {analysis.data}")
print(f"Overall risk level: {analysis.risk_level}")
# Data validation
validation = lawkit.validate_laws(
'complex_dataset.csv',
lawkit.LawkitOptions(format='json', consistency_check=True)
)
print(f"Validation status: {validation.data}")
# Conflict diagnosis
diagnosis = lawkit.diagnose_laws(
'complex_dataset.csv',
lawkit.LawkitOptions(format='json', report='detailed')
)
print(f"Diagnosis: {diagnosis.data}")
```
### Generate Sample Data
```python
import lawkit
# Generate Benford Law compliant data
benford_data = lawkit.generate_data('benf', samples=1000, seed=42)
print(benford_data)
# Generate normal distribution data
normal_data = lawkit.generate_data('normal', samples=500, mean=100, stddev=15)
# Generate Pareto distribution data
pareto_data = lawkit.generate_data('pareto', samples=1000, concentration=0.8)
# Test the pipeline: generate → analyze
data = lawkit.generate_data('benf', samples=10000, seed=42)
result = lawkit.analyze_string(data, 'benf', lawkit.LawkitOptions(output='json'))
print(f"Generated data risk level: {result.risk_level}")
```
### Analyze String Data Directly
```python
import lawkit
# Analyze CSV data from string
csv_data = """amount
123.45
456.78
789.12
234.56
567.89"""
result = lawkit.analyze_string(
csv_data,
'benf',
lawkit.LawkitOptions(format='json')
)
print(f"Risk assessment: {result.risk_level}")
# Analyze JSON data
json_data = '{"values": [12, 23, 34, 45, 56, 67, 78, 89]}'
result = lawkit.analyze_string(
json_data,
'normal',
lawkit.LawkitOptions(format='json')
)
print(f"Is normal: {result.p_value > 0.05}")
```
### Advanced Options
```python
import lawkit
# High-performance analysis with optimization
result = lawkit.analyze_benford(
'large_dataset.csv',
lawkit.LawkitOptions(
optimize=True,
parallel=True,
memory_efficient=True,
min_count=50,
threshold=0.001
)
)
# International number support
result = lawkit.analyze_benford(
'japanese_accounting.csv',
lawkit.LawkitOptions(
international=True,
format='csv',
output='json'
)
)
# Time series analysis
result = lawkit.analyze_normal(
'sensor_data.csv',
lawkit.LawkitOptions(
time_series=True,
outlier_detection=True,
output='json'
)
)
```
### Legacy API (Backward Compatibility)
```python
from lawkit import run_lawkit
# Direct command execution
result = run_lawkit(["benf", "data.csv", "--format", "csv", "--output", "json"])
if result.returncode == 0:
print("Analysis successful")
print(result.stdout)
else:
print("Analysis failed")
print(result.stderr)
# Legacy analysis functions
from lawkit.compat import run_benford_analysis, run_pareto_analysis
benford_result = run_benford_analysis("financial.csv", format="csv", output="json")
pareto_result = run_pareto_analysis("sales.csv", gini_coefficient=True)
```
## Installation and Setup
### Automatic Installation (Recommended)
```bash
pip install lawkit-python
```
The package will automatically download the appropriate binary for your platform.
### Manual Binary Installation
If automatic download fails:
```bash
lawkit-download-binary
```
### Development Installation
```bash
git clone https://github.com/kako-jun/lawkit
cd lawkit/lawkit-python
pip install -e .[dev]
```
### Verify Installation
```python
import lawkit
# Check if lawkit is available
if lawkit.is_lawkit_available():
print("✅ lawkit is installed and working")
print(f"Version: {lawkit.get_version()}")
else:
print("❌ lawkit is not available")
# Run self-test
if lawkit.selftest():
print("✅ All tests passed")
else:
print("❌ Self-test failed")
```
## Use Cases
### Financial Fraud Detection
```python
import lawkit
# Analyze invoice amounts for fraud
result = lawkit.analyze_benford('invoices.csv',
lawkit.LawkitOptions(output='json'))
if result.risk_level in ['High', 'Critical']:
print("🚨 Potential fraud detected in invoice data")
print(f"Statistical significance: p={result.p_value:.6f}")
print(f"Deviation from Benford Law: {result.mad:.2f}%")
```
### Business Intelligence
```python
import lawkit
# Analyze customer revenue distribution
result = lawkit.analyze_pareto('customer_revenue.csv',
lawkit.LawkitOptions(
output='json',
business_analysis=True,
gini_coefficient=True
))
print(f"Revenue concentration: {result.concentration_80_20:.1f}%")
print(f"Market inequality: {result.gini_coefficient:.3f}")
```
### Quality Control
```python
import lawkit
# Analyze manufacturing measurements
result = lawkit.analyze_normal('measurements.csv',
lawkit.LawkitOptions(
output='json',
outlier_detection=True,
test_type='shapiro'
))
if result.p_value < 0.05:
print("⚠️ Process out of control - not following normal distribution")
if result.outliers:
print(f"Found {len(result.outliers)} outlying measurements")
```
### Text Analysis
```python
import lawkit
# Analyze word frequency in documents
result = lawkit.analyze_zipf('document.txt',
lawkit.LawkitOptions(output='json'))
print(f"Text follows Zipf Law: {result.p_value > 0.05}")
print(f"Power law exponent: {result.exponent:.3f}")
```
## API Reference
### Main Functions
- `analyze_benford(input_data, options)` - Benford Law analysis
- `analyze_pareto(input_data, options)` - Pareto principle analysis
- `analyze_zipf(input_data, options)` - Zipf Law analysis
- `analyze_normal(input_data, options)` - Normal distribution analysis
- `analyze_poisson(input_data, options)` - Poisson distribution analysis
- `analyze_laws(input_data, options)` - Multi-law analysis
- `validate_laws(input_data, options)` - Data validation and consistency check
- `diagnose_laws(input_data, options)` - Conflict diagnosis and detailed reporting
- `compare_laws(input_data, options)` - Alias for analyze_laws (backward compatibility)
- `generate_data(law_type, samples, **kwargs)` - Generate sample data
- `analyze_string(content, law_type, options)` - Analyze string data directly
### Utility Functions
- `is_lawkit_available()` - Check if lawkit CLI is available
- `get_version()` - Get lawkit version
- `selftest()` - Run self-test
### Classes
- `LawkitOptions` - Configuration options for analysis
- `LawkitResult` - Analysis results with structured access
- `LawkitError` - Exception class for lawkit errors
## Platform Support
- **Windows**: x86_64
- **macOS**: x86_64, ARM64 (Apple Silicon)
- **Linux**: x86_64, ARM64
## Requirements
- Python 3.8+
- No additional dependencies required
## License
This project is licensed under the MIT License.
## Support
- GitHub Issues: https://github.com/kako-jun/lawkit/issues
- Documentation: https://github.com/kako-jun/lawkit/tree/main/docs
- Examples: https://github.com/kako-jun/lawkit/tree/main/docs/user-guide/examples.md
## Contributing
Contributions are welcome! Please read the [Contributing Guide](https://github.com/kako-jun/lawkit/blob/main/CONTRIBUTING.md) for details.
Raw data
{
"_id": null,
"home_page": null,
"name": "lawkit-python",
"maintainer": null,
"docs_url": null,
"requires_python": ">=3.8",
"maintainer_email": null,
"keywords": "anomaly-detection, audit, benford, compliance, data-quality, forensic-accounting, fraud-detection, normal, outlier-detection, pareto, poisson, statistical-analysis, statistics, zipf",
"author": null,
"author_email": "kako-jun <kako.jun.42@gmail.com>",
"download_url": "https://files.pythonhosted.org/packages/b4/60/7cf3f353cc6b5be0c0a39f6bb83770d4d035babd586d05ad9bad7015d4ce/lawkit_python-2.1.2.tar.gz",
"platform": null,
"description": "# lawkit-python\n\nPython wrapper for the `lawkit` CLI tool - Statistical law analysis toolkit for fraud detection and data quality assessment.\n\n## Installation\n\n```bash\npip install lawkit-python\n```\n\nThis will automatically download the appropriate `lawkit` binary for your system from GitHub Releases.\n\n## Quick Start\n\n```python\nimport lawkit\n\n# Analyze financial data with Benford Law\nresult = lawkit.analyze_benford('financial_data.csv')\nprint(result)\n\n# Get structured JSON output\njson_result = lawkit.analyze_benford(\n 'accounting.csv',\n lawkit.LawkitOptions(format='json')\n)\nprint(f\"Risk level: {json_result.risk_level}\")\nprint(f\"P-value: {json_result.p_value}\")\n\n# Check if data follows Pareto principle (80/20 rule)\npareto_result = lawkit.analyze_pareto(\n 'sales_data.csv',\n lawkit.LawkitOptions(format='json', gini_coefficient=True)\n)\nprint(f\"Gini coefficient: {pareto_result.gini_coefficient}\")\nprint(f\"80/20 concentration: {pareto_result.concentration_80_20}\")\n```\n\n## Features\n\n### Statistical Laws Supported\n\n- **Benford Law**: Detect fraud and anomalies in numerical data\n- **Pareto Principle**: Analyze 80/20 distributions and concentration\n- **Zipf Law**: Analyze word frequencies and power-law distributions\n- **Normal Distribution**: Test for normality and detect outliers\n- **Poisson Distribution**: Analyze rare events and count data\n\n### Advanced Analysis\n\n- **Multi-law Comparison**: Compare multiple statistical laws on the same data\n- **Outlier Detection**: Advanced anomaly detection algorithms\n- **Time Series Analysis**: Trend and seasonality detection\n- **International Numbers**: Support for various number formats (Japanese, Chinese, etc.)\n- **Memory Efficient**: Handle large datasets with streaming analysis\n\n### File Format Support\n\n- **CSV, JSON, YAML, TOML, XML**: Standard structured data formats\n- **Excel Files**: `.xlsx` and `.xls` support\n- **PDF Documents**: Extract and analyze numerical data from PDFs\n- **Word Documents**: Analyze data from `.docx` files\n- **PowerPoint**: Extract data from presentations\n\n## Usage Examples\n\n### Modern API (Recommended)\n\n```python\nimport lawkit\n\n# Analyze with Benford Law\nresult = lawkit.analyze_benford('invoice_data.csv')\nprint(result)\n\n# Get detailed JSON analysis\njson_result = lawkit.analyze_benford(\n 'financial_statements.xlsx',\n lawkit.LawkitOptions(\n format='excel',\n output='json',\n confidence=0.95,\n verbose=True\n )\n)\n\nif json_result.risk_level == \"High\":\n print(\"\u26a0\ufe0f High risk of fraud detected!\")\n print(f\"Chi-square: {json_result.chi_square}\")\n print(f\"P-value: {json_result.p_value}\")\n print(f\"MAD: {json_result.mad}%\")\n\n# Pareto analysis for business insights\npareto_result = lawkit.analyze_pareto(\n 'customer_revenue.csv',\n lawkit.LawkitOptions(\n output='json',\n gini_coefficient=True,\n business_analysis=True,\n percentiles=\"70,80,90\"\n )\n)\n\nprint(f\"Top 20% customers generate {pareto_result.concentration_80_20:.1f}% of revenue\")\nprint(f\"Income inequality (Gini): {pareto_result.gini_coefficient:.3f}\")\n\n# Normal distribution analysis with outlier detection\nnormal_result = lawkit.analyze_normal(\n 'quality_measurements.csv',\n lawkit.LawkitOptions(\n output='json',\n outlier_detection=True,\n test_type='shapiro'\n )\n)\n\nif normal_result.p_value < 0.05:\n print(\"Data does not follow normal distribution\")\n if normal_result.outliers:\n print(f\"Found {len(normal_result.outliers)} outliers\")\n\n# Multi-law analysis\nanalysis = lawkit.analyze_laws(\n 'complex_dataset.csv',\n lawkit.LawkitOptions(format='json', laws='benf,pareto,zipf')\n)\nprint(f\"Analysis results: {analysis.data}\")\nprint(f\"Overall risk level: {analysis.risk_level}\")\n\n# Data validation\nvalidation = lawkit.validate_laws(\n 'complex_dataset.csv',\n lawkit.LawkitOptions(format='json', consistency_check=True)\n)\nprint(f\"Validation status: {validation.data}\")\n\n# Conflict diagnosis\ndiagnosis = lawkit.diagnose_laws(\n 'complex_dataset.csv',\n lawkit.LawkitOptions(format='json', report='detailed')\n)\nprint(f\"Diagnosis: {diagnosis.data}\")\n```\n\n### Generate Sample Data\n\n```python\nimport lawkit\n\n# Generate Benford Law compliant data\nbenford_data = lawkit.generate_data('benf', samples=1000, seed=42)\nprint(benford_data)\n\n# Generate normal distribution data\nnormal_data = lawkit.generate_data('normal', samples=500, mean=100, stddev=15)\n\n# Generate Pareto distribution data\npareto_data = lawkit.generate_data('pareto', samples=1000, concentration=0.8)\n\n# Test the pipeline: generate \u2192 analyze\ndata = lawkit.generate_data('benf', samples=10000, seed=42)\nresult = lawkit.analyze_string(data, 'benf', lawkit.LawkitOptions(output='json'))\nprint(f\"Generated data risk level: {result.risk_level}\")\n```\n\n### Analyze String Data Directly\n\n```python\nimport lawkit\n\n# Analyze CSV data from string\ncsv_data = \"\"\"amount\n123.45\n456.78\n789.12\n234.56\n567.89\"\"\"\n\nresult = lawkit.analyze_string(\n csv_data,\n 'benf',\n lawkit.LawkitOptions(format='json')\n)\nprint(f\"Risk assessment: {result.risk_level}\")\n\n# Analyze JSON data\njson_data = '{\"values\": [12, 23, 34, 45, 56, 67, 78, 89]}'\nresult = lawkit.analyze_string(\n json_data,\n 'normal',\n lawkit.LawkitOptions(format='json')\n)\nprint(f\"Is normal: {result.p_value > 0.05}\")\n```\n\n### Advanced Options\n\n```python\nimport lawkit\n\n# High-performance analysis with optimization\nresult = lawkit.analyze_benford(\n 'large_dataset.csv',\n lawkit.LawkitOptions(\n optimize=True,\n parallel=True,\n memory_efficient=True,\n min_count=50,\n threshold=0.001\n )\n)\n\n# International number support\nresult = lawkit.analyze_benford(\n 'japanese_accounting.csv',\n lawkit.LawkitOptions(\n international=True,\n format='csv',\n output='json'\n )\n)\n\n# Time series analysis\nresult = lawkit.analyze_normal(\n 'sensor_data.csv',\n lawkit.LawkitOptions(\n time_series=True,\n outlier_detection=True,\n output='json'\n )\n)\n```\n\n### Legacy API (Backward Compatibility)\n\n```python\nfrom lawkit import run_lawkit\n\n# Direct command execution\nresult = run_lawkit([\"benf\", \"data.csv\", \"--format\", \"csv\", \"--output\", \"json\"])\n\nif result.returncode == 0:\n print(\"Analysis successful\")\n print(result.stdout)\nelse:\n print(\"Analysis failed\")\n print(result.stderr)\n\n# Legacy analysis functions\nfrom lawkit.compat import run_benford_analysis, run_pareto_analysis\n\nbenford_result = run_benford_analysis(\"financial.csv\", format=\"csv\", output=\"json\")\npareto_result = run_pareto_analysis(\"sales.csv\", gini_coefficient=True)\n```\n\n## Installation and Setup\n\n### Automatic Installation (Recommended)\n\n```bash\npip install lawkit-python\n```\n\nThe package will automatically download the appropriate binary for your platform.\n\n### Manual Binary Installation\n\nIf automatic download fails:\n\n```bash\nlawkit-download-binary\n```\n\n### Development Installation\n\n```bash\ngit clone https://github.com/kako-jun/lawkit\ncd lawkit/lawkit-python\npip install -e .[dev]\n```\n\n### Verify Installation\n\n```python\nimport lawkit\n\n# Check if lawkit is available\nif lawkit.is_lawkit_available():\n print(\"\u2705 lawkit is installed and working\")\n print(f\"Version: {lawkit.get_version()}\")\nelse:\n print(\"\u274c lawkit is not available\")\n\n# Run self-test\nif lawkit.selftest():\n print(\"\u2705 All tests passed\")\nelse:\n print(\"\u274c Self-test failed\")\n```\n\n## Use Cases\n\n### Financial Fraud Detection\n\n```python\nimport lawkit\n\n# Analyze invoice amounts for fraud\nresult = lawkit.analyze_benford('invoices.csv', \n lawkit.LawkitOptions(output='json'))\n\nif result.risk_level in ['High', 'Critical']:\n print(\"\ud83d\udea8 Potential fraud detected in invoice data\")\n print(f\"Statistical significance: p={result.p_value:.6f}\")\n print(f\"Deviation from Benford Law: {result.mad:.2f}%\")\n```\n\n### Business Intelligence\n\n```python\nimport lawkit\n\n# Analyze customer revenue distribution\nresult = lawkit.analyze_pareto('customer_revenue.csv',\n lawkit.LawkitOptions(\n output='json',\n business_analysis=True,\n gini_coefficient=True\n ))\n\nprint(f\"Revenue concentration: {result.concentration_80_20:.1f}%\")\nprint(f\"Market inequality: {result.gini_coefficient:.3f}\")\n```\n\n### Quality Control\n\n```python\nimport lawkit\n\n# Analyze manufacturing measurements\nresult = lawkit.analyze_normal('measurements.csv',\n lawkit.LawkitOptions(\n output='json',\n outlier_detection=True,\n test_type='shapiro'\n ))\n\nif result.p_value < 0.05:\n print(\"\u26a0\ufe0f Process out of control - not following normal distribution\")\n if result.outliers:\n print(f\"Found {len(result.outliers)} outlying measurements\")\n```\n\n### Text Analysis\n\n```python\nimport lawkit\n\n# Analyze word frequency in documents\nresult = lawkit.analyze_zipf('document.txt',\n lawkit.LawkitOptions(output='json'))\n\nprint(f\"Text follows Zipf Law: {result.p_value > 0.05}\")\nprint(f\"Power law exponent: {result.exponent:.3f}\")\n```\n\n## API Reference\n\n### Main Functions\n\n- `analyze_benford(input_data, options)` - Benford Law analysis\n- `analyze_pareto(input_data, options)` - Pareto principle analysis \n- `analyze_zipf(input_data, options)` - Zipf Law analysis\n- `analyze_normal(input_data, options)` - Normal distribution analysis\n- `analyze_poisson(input_data, options)` - Poisson distribution analysis\n- `analyze_laws(input_data, options)` - Multi-law analysis\n- `validate_laws(input_data, options)` - Data validation and consistency check\n- `diagnose_laws(input_data, options)` - Conflict diagnosis and detailed reporting\n- `compare_laws(input_data, options)` - Alias for analyze_laws (backward compatibility)\n- `generate_data(law_type, samples, **kwargs)` - Generate sample data\n- `analyze_string(content, law_type, options)` - Analyze string data directly\n\n### Utility Functions\n\n- `is_lawkit_available()` - Check if lawkit CLI is available\n- `get_version()` - Get lawkit version\n- `selftest()` - Run self-test\n\n### Classes\n\n- `LawkitOptions` - Configuration options for analysis\n- `LawkitResult` - Analysis results with structured access\n- `LawkitError` - Exception class for lawkit errors\n\n## Platform Support\n\n- **Windows**: x86_64\n- **macOS**: x86_64, ARM64 (Apple Silicon)\n- **Linux**: x86_64, ARM64\n\n## Requirements\n\n- Python 3.8+\n- No additional dependencies required\n\n## License\n\nThis project is licensed under the MIT License.\n\n## Support\n\n- GitHub Issues: https://github.com/kako-jun/lawkit/issues\n- Documentation: https://github.com/kako-jun/lawkit/tree/main/docs\n- Examples: https://github.com/kako-jun/lawkit/tree/main/docs/user-guide/examples.md\n\n## Contributing\n\nContributions are welcome! Please read the [Contributing Guide](https://github.com/kako-jun/lawkit/blob/main/CONTRIBUTING.md) for details.",
"bugtrack_url": null,
"license": null,
"summary": "Python wrapper for lawkit - Statistical law analysis toolkit for fraud detection and data quality assessment",
"version": "2.1.2",
"project_urls": {
"Documentation": "https://github.com/kako-jun/lawkit/tree/main/docs",
"Homepage": "https://github.com/kako-jun/lawkit",
"Issues": "https://github.com/kako-jun/lawkit/issues",
"Repository": "https://github.com/kako-jun/lawkit"
},
"split_keywords": [
"anomaly-detection",
" audit",
" benford",
" compliance",
" data-quality",
" forensic-accounting",
" fraud-detection",
" normal",
" outlier-detection",
" pareto",
" poisson",
" statistical-analysis",
" statistics",
" zipf"
],
"urls": [
{
"comment_text": null,
"digests": {
"blake2b_256": "063cc572f1947aa492de3d2c0b1dfd11c099dc182100a2665fc4b686e231ea27",
"md5": "8e877300c126afe5d0db400bb7afc1c1",
"sha256": "13f9ddcc07740bb3a3e9d88e6e0c9f31b4e3d810873c6c8a73f24498e982b816"
},
"downloads": -1,
"filename": "lawkit_python-2.1.2-py3-none-any.whl",
"has_sig": false,
"md5_digest": "8e877300c126afe5d0db400bb7afc1c1",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": ">=3.8",
"size": 13558,
"upload_time": "2025-07-09T13:31:33",
"upload_time_iso_8601": "2025-07-09T13:31:33.609193Z",
"url": "https://files.pythonhosted.org/packages/06/3c/c572f1947aa492de3d2c0b1dfd11c099dc182100a2665fc4b686e231ea27/lawkit_python-2.1.2-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": null,
"digests": {
"blake2b_256": "b4607cf3f353cc6b5be0c0a39f6bb83770d4d035babd586d05ad9bad7015d4ce",
"md5": "77cf67d9781998e061a2083e6731e537",
"sha256": "8f0f901ea040c72109576a4154dec97442d68a370e7469f84c040b574b3d1f5e"
},
"downloads": -1,
"filename": "lawkit_python-2.1.2.tar.gz",
"has_sig": false,
"md5_digest": "77cf67d9781998e061a2083e6731e537",
"packagetype": "sdist",
"python_version": "source",
"requires_python": ">=3.8",
"size": 12142,
"upload_time": "2025-07-09T13:31:34",
"upload_time_iso_8601": "2025-07-09T13:31:34.754536Z",
"url": "https://files.pythonhosted.org/packages/b4/60/7cf3f353cc6b5be0c0a39f6bb83770d4d035babd586d05ad9bad7015d4ce/lawkit_python-2.1.2.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2025-07-09 13:31:34",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "kako-jun",
"github_project": "lawkit",
"travis_ci": false,
"coveralls": false,
"github_actions": true,
"lcname": "lawkit-python"
}