# LangChain Parquet Logger
High-performance logging for LangChain - save all your LLM interactions to Parquet files for analysis.
## Quick Start (2 minutes)
### Install
```bash
pip install langchain-callback-parquet-logger
# With S3 support
pip install "langchain-callback-parquet-logger[s3]"
```
### Basic Usage
```python
from langchain_callback_parquet_logger import ParquetLogger
from langchain_openai import ChatOpenAI
# Add logger to any LangChain LLM
logger = ParquetLogger("./logs")
llm = ChatOpenAI(callbacks=[logger])
response = llm.invoke("What is 2+2?")
# Your logs are automatically saved to ./logs/
```
### Batch Processing
```python
import pandas as pd
from langchain_callback_parquet_logger import batch_process
# Your data
df = pd.DataFrame({
'prompt': ['What is AI?', 'Explain quantum computing']
})
# Process it (logs automatically saved)
results = await batch_process(df)
```
That's it! Your logs are in Parquet format, ready for analysis.
## Core Features
### 1. Custom Tracking IDs
Track specific requests with custom IDs and descriptions:
```python
from langchain_callback_parquet_logger import ParquetLogger, with_tags
logger = ParquetLogger("./logs")
llm = ChatOpenAI(callbacks=[logger])
# Add custom ID with description to track this specific request
response = llm.invoke(
"What is quantum computing?",
config=with_tags(
custom_id="user-123-session-456",
custom_id_description="User session from mobile app"
)
)
```
### 2. Batch Processing (Simple)
```python
import pandas as pd
from langchain_openai import ChatOpenAI
from langchain_callback_parquet_logger import batch_process, with_tags, LLMConfig
# Prepare your data
df = pd.DataFrame({
'prompt': ['What is AI?', 'Explain DNA'],
'config': [
with_tags(custom_id='q1', custom_id_description='Science FAQ'),
with_tags(custom_id='q2', custom_id_description='Science FAQ')
]
})
# Process with automatic logging
results = await batch_process(
df,
llm_config=LLMConfig(
llm_class=ChatOpenAI,
llm_kwargs={'model': 'gpt-4', 'temperature': 0.7}
)
)
```
### 3. Batch Processing (Full Configuration)
```python
import pandas as pd
from langchain_openai import ChatOpenAI
from langchain_callback_parquet_logger import (
batch_process,
with_tags,
LLMConfig,
JobConfig,
StorageConfig,
ProcessingConfig,
ColumnConfig,
S3Config
)
# Prepare your data with custom column names
df = pd.DataFrame({
'question': ['What is AI?', 'Explain DNA', 'What is quantum computing?'],
'user_id': ['user1', 'user2', 'user3'],
'tool_list': [[tool1, tool2], None, [tool3]] # Optional tools
})
# Add config for each row (required)
df['run_config'] = df['user_id'].apply(lambda x: with_tags(
custom_id=x,
tags=['production', 'v2']
))
# Process with ALL configuration options
results = await batch_process(
df,
# LLM configuration
llm_config=LLMConfig(
llm_class=ChatOpenAI,
llm_kwargs={'model': 'gpt-4', 'temperature': 0.7},
model_kwargs={'top_p': 0.9}, # Additional model parameters
structured_output=None # or Pydantic model for structured responses
),
# Job metadata configuration (all fields except category are optional)
job_config=JobConfig(
category="research",
subcategory="science", # Optional, defaults to None
description="Analyzing scientific questions", # Optional
version="2.0.0", # Optional
environment="production", # Optional
metadata={"team": "data-science", "priority": "high"} # Optional
),
# Storage configuration
storage_config=StorageConfig(
output_dir="./batch_logs",
path_template="{job_category}/{date}/{job_subcategory}/v{job_version_safe}", # Custom path structure with version
s3_config=S3Config(
bucket="my-llm-logs",
prefix="langchain-logs/",
on_failure="continue", # or "error" to fail on S3 errors
retry_attempts=3
)
),
# Processing configuration
processing_config=ProcessingConfig(
max_concurrency=100, # Parallel requests
buffer_size=1000, # Logger buffer size
show_progress=True, # Progress bar with real-time updates
return_exceptions=True, # Don't fail on single errors
return_results=True, # Set False for huge datasets to save memory
event_types=['llm_start', 'llm_end', 'llm_error'], # Events to log
partition_on="date" # Partition strategy
),
# Column name configuration (if not using defaults)
column_config=ColumnConfig(
prompt="question", # Your prompt column name
config="run_config", # Your config column name
tools="tool_list" # Your tools column name (optional)
)
)
# Results are returned AND saved to Parquet files
df['answer'] = results
```
### 4. S3 Upload
For production and cloud environments:
```python
from langchain_callback_parquet_logger import ParquetLogger, S3Config
logger = ParquetLogger(
log_dir="./logs",
s3_config=S3Config(
bucket="my-llm-logs",
prefix="production/",
on_failure="error" # Fail fast in production
)
)
```
### 5. Event Type Selection
Choose what events to log:
```python
# Default: Only LLM events
logger = ParquetLogger("./logs")
# Log everything
logger = ParquetLogger(
"./logs",
event_types=['llm_start', 'llm_end', 'llm_error',
'chain_start', 'chain_end', 'chain_error',
'tool_start', 'tool_end', 'tool_error']
)
```
## Reading Your Logs
```python
import pandas as pd
import json
# Read all logs
df = pd.read_parquet("./logs")
# Parse the payload
df['data'] = df['payload'].apply(json.loads)
# Analyze token usage
df['tokens'] = df['data'].apply(lambda x: x.get('data', {}).get('outputs', {}).get('usage', {}).get('total_tokens'))
```
## v2.0 Breaking Changes
If upgrading from v1.x:
### Old (v1.x)
```python
logger = ParquetLogger(
log_dir="./logs",
s3_bucket="my-bucket",
s3_prefix="logs/",
s3_on_failure="error"
)
```
### New (v2.0)
```python
from langchain_callback_parquet_logger import ParquetLogger, S3Config
logger = ParquetLogger(
log_dir="./logs",
s3_config=S3Config(
bucket="my-bucket",
prefix="logs/",
on_failure="error"
)
)
```
### batch_process changes:
- Now uses LLMConfig dataclass for LLM configuration
- Dataclass configs replace multiple parameters
- Column renamed from `logger_custom_id` to `custom_id`
- See batch processing examples above
#### Old batch_process (v1.x)
```python
await batch_process(
df,
llm=llm_instance, # or llm_class with llm_kwargs
structured_output=MyModel
)
```
#### New batch_process (v2.0)
```python
await batch_process(
df,
llm_config=LLMConfig(
llm_class=ChatOpenAI,
llm_kwargs={'model': 'gpt-4'},
model_kwargs={'top_p': 0.9}, # Additional API params
structured_output=MyModel
)
)
## Configuration Classes
### ParquetLogger
- `log_dir`: Where to save logs (default: "./llm_logs")
- `buffer_size`: Entries before auto-flush (default: 100)
- `s3_config`: Optional S3Config for uploads
### LLMConfig
- `llm_class`: The LangChain LLM class to instantiate (e.g., ChatOpenAI)
- `llm_kwargs`: Arguments for the LLM constructor (model, temperature, etc.)
- `model_kwargs`: Additional API parameters (top_p, frequency_penalty, etc.)
- `structured_output`: Optional Pydantic model for structured responses
### JobConfig
- `category`: Job category (required, default: "batch_processing")
- `subcategory`: Job subcategory (optional, default: None)
- `version`: Version string (optional, default: None)
- `environment`: Environment name (optional, default: None)
- `description`: Job description (optional, default: None)
- `metadata`: Additional metadata dict (optional, default: None)
### StorageConfig
- `output_dir`: Local directory (default: "./batch_logs")
- `path_template`: Path template for organizing files (default: "{job_category}/{job_subcategory}/v{job_version_safe}")
- Available variables: `job_category`, `job_subcategory`, `job_version` (original), `job_version_safe` (dots replaced with underscores), `environment`, `date`
- Example paths: `ml_training/image_classification/v2_1_0/` or `research/nlp/vunversioned/` (when no version specified)
- `s3_config`: Optional S3Config for uploads
### S3Config
- `bucket`: S3 bucket name
- `prefix`: S3 prefix/folder (default: "langchain-logs/")
- `on_failure`: "error" or "continue" (default: "error")
## Advanced Usage
### Low-Level Batch Processing
If you need direct control over logging:
```python
from langchain_callback_parquet_logger import batch_run, ParquetLogger
# Setup your own logging
with ParquetLogger('./logs') as logger:
llm = ChatOpenAI(callbacks=[logger])
# Use low-level batch_run
results = await batch_run(df, llm, max_concurrency=100)
```
### Context Manager (Notebooks)
For Jupyter notebooks, use context manager for immediate writes:
```python
with ParquetLogger('./logs', buffer_size=1) as logger:
llm = ChatOpenAI(callbacks=[logger])
response = llm.invoke("Hello!")
# Logs are guaranteed to be written
```
## Log Schema
| Column | Type | Description |
|--------|------|-------------|
| `timestamp` | timestamp | Event time (UTC) |
| `run_id` | string | Unique run ID |
| `parent_run_id` | string | Parent run ID for nested calls |
| `custom_id` | string | Your custom tracking ID |
| `event_type` | string | Event type (llm_start, llm_end, etc.) |
| `logger_metadata` | string | JSON metadata |
| `payload` | string | Full event data as JSON |
## Payload Structure
All events use a consistent JSON structure in the payload column:
```json
{
"event_type": "llm_end",
"timestamp": "2025-09-18T10:30:00Z",
"execution": {
"run_id": "uuid-here",
"parent_run_id": "",
"custom_id": "user-123"
},
"data": {
"prompts": ["..."],
"llm_type": "openai-chat", // LangChain's native LLM type
"response": {"content": "..."},
"usage": {"total_tokens": 100}
},
"raw": {
// Complete dump of all callback arguments
// Includes all kwargs plus positional args (serialized when possible)
"response": {"generations": [...], "llm_output": {...}},
"run_id": "uuid-here",
"parent_run_id": "",
// ... all other arguments passed to the callback
}
}
```
## Installation Options
```bash
# Basic
pip install langchain-callback-parquet-logger
# With S3 support
pip install "langchain-callback-parquet-logger[s3]"
# With background retrieval support (OpenAI)
pip install "langchain-callback-parquet-logger[background]"
# Everything
pip install "langchain-callback-parquet-logger[s3,background]"
```
## License
MIT
## Contributing
Pull requests welcome! Keep it simple.
## Support
[GitHub Issues](https://github.com/turbo3136/langchain-callback-parquet-logger/issues)
Raw data
{
"_id": null,
"home_page": null,
"name": "langchain-callback-parquet-logger",
"maintainer": null,
"docs_url": null,
"requires_python": ">=3.8",
"maintainer_email": null,
"keywords": "langchain, logging, parquet, llm, callback, monitoring",
"author": null,
"author_email": "turbo3136 <turbo3136@gmail.com>",
"download_url": "https://files.pythonhosted.org/packages/cd/c3/0edff352900022d51f8b56b2a2750cb28e7fd649e1f03b4d64ffb552ac91/langchain_callback_parquet_logger-2.0.2.tar.gz",
"platform": null,
"description": "# LangChain Parquet Logger\n\nHigh-performance logging for LangChain - save all your LLM interactions to Parquet files for analysis.\n\n## Quick Start (2 minutes)\n\n### Install\n```bash\npip install langchain-callback-parquet-logger\n\n# With S3 support\npip install \"langchain-callback-parquet-logger[s3]\"\n```\n\n### Basic Usage\n```python\nfrom langchain_callback_parquet_logger import ParquetLogger\nfrom langchain_openai import ChatOpenAI\n\n# Add logger to any LangChain LLM\nlogger = ParquetLogger(\"./logs\")\nllm = ChatOpenAI(callbacks=[logger])\n\nresponse = llm.invoke(\"What is 2+2?\")\n# Your logs are automatically saved to ./logs/\n```\n\n### Batch Processing\n```python\nimport pandas as pd\nfrom langchain_callback_parquet_logger import batch_process\n\n# Your data\ndf = pd.DataFrame({\n 'prompt': ['What is AI?', 'Explain quantum computing']\n})\n\n# Process it (logs automatically saved)\nresults = await batch_process(df)\n```\n\nThat's it! Your logs are in Parquet format, ready for analysis.\n\n## Core Features\n\n### 1. Custom Tracking IDs\n\nTrack specific requests with custom IDs and descriptions:\n\n```python\nfrom langchain_callback_parquet_logger import ParquetLogger, with_tags\n\nlogger = ParquetLogger(\"./logs\")\nllm = ChatOpenAI(callbacks=[logger])\n\n# Add custom ID with description to track this specific request\nresponse = llm.invoke(\n \"What is quantum computing?\",\n config=with_tags(\n custom_id=\"user-123-session-456\",\n custom_id_description=\"User session from mobile app\"\n )\n)\n```\n\n### 2. Batch Processing (Simple)\n\n```python\nimport pandas as pd\nfrom langchain_openai import ChatOpenAI\nfrom langchain_callback_parquet_logger import batch_process, with_tags, LLMConfig\n\n# Prepare your data\ndf = pd.DataFrame({\n 'prompt': ['What is AI?', 'Explain DNA'],\n 'config': [\n with_tags(custom_id='q1', custom_id_description='Science FAQ'),\n with_tags(custom_id='q2', custom_id_description='Science FAQ')\n ]\n})\n\n# Process with automatic logging\nresults = await batch_process(\n df,\n llm_config=LLMConfig(\n llm_class=ChatOpenAI,\n llm_kwargs={'model': 'gpt-4', 'temperature': 0.7}\n )\n)\n```\n\n### 3. Batch Processing (Full Configuration)\n\n```python\nimport pandas as pd\nfrom langchain_openai import ChatOpenAI\nfrom langchain_callback_parquet_logger import (\n batch_process,\n with_tags,\n LLMConfig,\n JobConfig,\n StorageConfig,\n ProcessingConfig,\n ColumnConfig,\n S3Config\n)\n\n# Prepare your data with custom column names\ndf = pd.DataFrame({\n 'question': ['What is AI?', 'Explain DNA', 'What is quantum computing?'],\n 'user_id': ['user1', 'user2', 'user3'],\n 'tool_list': [[tool1, tool2], None, [tool3]] # Optional tools\n})\n\n# Add config for each row (required)\ndf['run_config'] = df['user_id'].apply(lambda x: with_tags(\n custom_id=x,\n tags=['production', 'v2']\n))\n\n# Process with ALL configuration options\nresults = await batch_process(\n df,\n # LLM configuration\n llm_config=LLMConfig(\n llm_class=ChatOpenAI,\n llm_kwargs={'model': 'gpt-4', 'temperature': 0.7},\n model_kwargs={'top_p': 0.9}, # Additional model parameters\n structured_output=None # or Pydantic model for structured responses\n ),\n\n # Job metadata configuration (all fields except category are optional)\n job_config=JobConfig(\n category=\"research\",\n subcategory=\"science\", # Optional, defaults to None\n description=\"Analyzing scientific questions\", # Optional\n version=\"2.0.0\", # Optional\n environment=\"production\", # Optional\n metadata={\"team\": \"data-science\", \"priority\": \"high\"} # Optional\n ),\n\n # Storage configuration\n storage_config=StorageConfig(\n output_dir=\"./batch_logs\",\n path_template=\"{job_category}/{date}/{job_subcategory}/v{job_version_safe}\", # Custom path structure with version\n s3_config=S3Config(\n bucket=\"my-llm-logs\",\n prefix=\"langchain-logs/\",\n on_failure=\"continue\", # or \"error\" to fail on S3 errors\n retry_attempts=3\n )\n ),\n\n # Processing configuration\n processing_config=ProcessingConfig(\n max_concurrency=100, # Parallel requests\n buffer_size=1000, # Logger buffer size\n show_progress=True, # Progress bar with real-time updates\n return_exceptions=True, # Don't fail on single errors\n return_results=True, # Set False for huge datasets to save memory\n event_types=['llm_start', 'llm_end', 'llm_error'], # Events to log\n partition_on=\"date\" # Partition strategy\n ),\n\n # Column name configuration (if not using defaults)\n column_config=ColumnConfig(\n prompt=\"question\", # Your prompt column name\n config=\"run_config\", # Your config column name\n tools=\"tool_list\" # Your tools column name (optional)\n )\n)\n\n# Results are returned AND saved to Parquet files\ndf['answer'] = results\n```\n\n### 4. S3 Upload\n\nFor production and cloud environments:\n\n```python\nfrom langchain_callback_parquet_logger import ParquetLogger, S3Config\n\nlogger = ParquetLogger(\n log_dir=\"./logs\",\n s3_config=S3Config(\n bucket=\"my-llm-logs\",\n prefix=\"production/\",\n on_failure=\"error\" # Fail fast in production\n )\n)\n```\n\n### 5. Event Type Selection\n\nChoose what events to log:\n\n```python\n# Default: Only LLM events\nlogger = ParquetLogger(\"./logs\")\n\n# Log everything\nlogger = ParquetLogger(\n \"./logs\",\n event_types=['llm_start', 'llm_end', 'llm_error',\n 'chain_start', 'chain_end', 'chain_error',\n 'tool_start', 'tool_end', 'tool_error']\n)\n```\n\n## Reading Your Logs\n\n```python\nimport pandas as pd\nimport json\n\n# Read all logs\ndf = pd.read_parquet(\"./logs\")\n\n# Parse the payload\ndf['data'] = df['payload'].apply(json.loads)\n\n# Analyze token usage\ndf['tokens'] = df['data'].apply(lambda x: x.get('data', {}).get('outputs', {}).get('usage', {}).get('total_tokens'))\n```\n\n## v2.0 Breaking Changes\n\nIf upgrading from v1.x:\n\n### Old (v1.x)\n```python\nlogger = ParquetLogger(\n log_dir=\"./logs\",\n s3_bucket=\"my-bucket\",\n s3_prefix=\"logs/\",\n s3_on_failure=\"error\"\n)\n```\n\n### New (v2.0)\n```python\nfrom langchain_callback_parquet_logger import ParquetLogger, S3Config\n\nlogger = ParquetLogger(\n log_dir=\"./logs\",\n s3_config=S3Config(\n bucket=\"my-bucket\",\n prefix=\"logs/\",\n on_failure=\"error\"\n )\n)\n```\n\n### batch_process changes:\n- Now uses LLMConfig dataclass for LLM configuration\n- Dataclass configs replace multiple parameters\n- Column renamed from `logger_custom_id` to `custom_id`\n- See batch processing examples above\n\n#### Old batch_process (v1.x)\n```python\nawait batch_process(\n df,\n llm=llm_instance, # or llm_class with llm_kwargs\n structured_output=MyModel\n)\n```\n\n#### New batch_process (v2.0)\n```python\nawait batch_process(\n df,\n llm_config=LLMConfig(\n llm_class=ChatOpenAI,\n llm_kwargs={'model': 'gpt-4'},\n model_kwargs={'top_p': 0.9}, # Additional API params\n structured_output=MyModel\n )\n)\n\n## Configuration Classes\n\n### ParquetLogger\n- `log_dir`: Where to save logs (default: \"./llm_logs\")\n- `buffer_size`: Entries before auto-flush (default: 100)\n- `s3_config`: Optional S3Config for uploads\n\n### LLMConfig\n- `llm_class`: The LangChain LLM class to instantiate (e.g., ChatOpenAI)\n- `llm_kwargs`: Arguments for the LLM constructor (model, temperature, etc.)\n- `model_kwargs`: Additional API parameters (top_p, frequency_penalty, etc.)\n- `structured_output`: Optional Pydantic model for structured responses\n\n### JobConfig\n- `category`: Job category (required, default: \"batch_processing\")\n- `subcategory`: Job subcategory (optional, default: None)\n- `version`: Version string (optional, default: None)\n- `environment`: Environment name (optional, default: None)\n- `description`: Job description (optional, default: None)\n- `metadata`: Additional metadata dict (optional, default: None)\n\n### StorageConfig\n- `output_dir`: Local directory (default: \"./batch_logs\")\n- `path_template`: Path template for organizing files (default: \"{job_category}/{job_subcategory}/v{job_version_safe}\")\n - Available variables: `job_category`, `job_subcategory`, `job_version` (original), `job_version_safe` (dots replaced with underscores), `environment`, `date`\n - Example paths: `ml_training/image_classification/v2_1_0/` or `research/nlp/vunversioned/` (when no version specified)\n- `s3_config`: Optional S3Config for uploads\n\n### S3Config\n- `bucket`: S3 bucket name\n- `prefix`: S3 prefix/folder (default: \"langchain-logs/\")\n- `on_failure`: \"error\" or \"continue\" (default: \"error\")\n\n## Advanced Usage\n\n### Low-Level Batch Processing\n\nIf you need direct control over logging:\n\n```python\nfrom langchain_callback_parquet_logger import batch_run, ParquetLogger\n\n# Setup your own logging\nwith ParquetLogger('./logs') as logger:\n llm = ChatOpenAI(callbacks=[logger])\n\n # Use low-level batch_run\n results = await batch_run(df, llm, max_concurrency=100)\n```\n\n### Context Manager (Notebooks)\n\nFor Jupyter notebooks, use context manager for immediate writes:\n\n```python\nwith ParquetLogger('./logs', buffer_size=1) as logger:\n llm = ChatOpenAI(callbacks=[logger])\n response = llm.invoke(\"Hello!\")\n# Logs are guaranteed to be written\n```\n\n## Log Schema\n\n| Column | Type | Description |\n|--------|------|-------------|\n| `timestamp` | timestamp | Event time (UTC) |\n| `run_id` | string | Unique run ID |\n| `parent_run_id` | string | Parent run ID for nested calls |\n| `custom_id` | string | Your custom tracking ID |\n| `event_type` | string | Event type (llm_start, llm_end, etc.) |\n| `logger_metadata` | string | JSON metadata |\n| `payload` | string | Full event data as JSON |\n\n## Payload Structure\n\nAll events use a consistent JSON structure in the payload column:\n\n```json\n{\n \"event_type\": \"llm_end\",\n \"timestamp\": \"2025-09-18T10:30:00Z\",\n \"execution\": {\n \"run_id\": \"uuid-here\",\n \"parent_run_id\": \"\",\n \"custom_id\": \"user-123\"\n },\n \"data\": {\n \"prompts\": [\"...\"],\n \"llm_type\": \"openai-chat\", // LangChain's native LLM type\n \"response\": {\"content\": \"...\"},\n \"usage\": {\"total_tokens\": 100}\n },\n \"raw\": {\n // Complete dump of all callback arguments\n // Includes all kwargs plus positional args (serialized when possible)\n \"response\": {\"generations\": [...], \"llm_output\": {...}},\n \"run_id\": \"uuid-here\",\n \"parent_run_id\": \"\",\n // ... all other arguments passed to the callback\n }\n}\n```\n\n## Installation Options\n\n```bash\n# Basic\npip install langchain-callback-parquet-logger\n\n# With S3 support\npip install \"langchain-callback-parquet-logger[s3]\"\n\n# With background retrieval support (OpenAI)\npip install \"langchain-callback-parquet-logger[background]\"\n\n# Everything\npip install \"langchain-callback-parquet-logger[s3,background]\"\n```\n\n## License\n\nMIT\n\n## Contributing\n\nPull requests welcome! Keep it simple.\n\n## Support\n\n[GitHub Issues](https://github.com/turbo3136/langchain-callback-parquet-logger/issues)\n",
"bugtrack_url": null,
"license": "MIT",
"summary": "A Parquet-based callback handler for logging LangChain LLM interactions",
"version": "2.0.2",
"project_urls": {
"Homepage": "https://github.com/turbo3136/langchain-callback-parquet-logger",
"Issues": "https://github.com/turbo3136/langchain-callback-parquet-logger/issues",
"Repository": "https://github.com/turbo3136/langchain-callback-parquet-logger"
},
"split_keywords": [
"langchain",
" logging",
" parquet",
" llm",
" callback",
" monitoring"
],
"urls": [
{
"comment_text": null,
"digests": {
"blake2b_256": "9519959f088236e12ef3fbbd6491020268740c9d0c7507fff8a7da91c530f60d",
"md5": "d45d255511be9cf8de33b5d8c0206d42",
"sha256": "b6ae56f636873deea4cd8c314faf9fcf12b9d9c50899cb2eca9bd37afbdc3d73"
},
"downloads": -1,
"filename": "langchain_callback_parquet_logger-2.0.2-py3-none-any.whl",
"has_sig": false,
"md5_digest": "d45d255511be9cf8de33b5d8c0206d42",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": ">=3.8",
"size": 23800,
"upload_time": "2025-09-18T21:40:12",
"upload_time_iso_8601": "2025-09-18T21:40:12.562214Z",
"url": "https://files.pythonhosted.org/packages/95/19/959f088236e12ef3fbbd6491020268740c9d0c7507fff8a7da91c530f60d/langchain_callback_parquet_logger-2.0.2-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": null,
"digests": {
"blake2b_256": "cdc30edff352900022d51f8b56b2a2750cb28e7fd649e1f03b4d64ffb552ac91",
"md5": "636b9abe85a02cf68b553e46c05fbc52",
"sha256": "d247f37d127273b78c250fa6cd8ab23c0a8a4883e2c67400b332a56dc1aac926"
},
"downloads": -1,
"filename": "langchain_callback_parquet_logger-2.0.2.tar.gz",
"has_sig": false,
"md5_digest": "636b9abe85a02cf68b553e46c05fbc52",
"packagetype": "sdist",
"python_version": "source",
"requires_python": ">=3.8",
"size": 38155,
"upload_time": "2025-09-18T21:40:13",
"upload_time_iso_8601": "2025-09-18T21:40:13.689330Z",
"url": "https://files.pythonhosted.org/packages/cd/c3/0edff352900022d51f8b56b2a2750cb28e7fd649e1f03b4d64ffb552ac91/langchain_callback_parquet_logger-2.0.2.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2025-09-18 21:40:13",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "turbo3136",
"github_project": "langchain-callback-parquet-logger",
"travis_ci": false,
"coveralls": false,
"github_actions": true,
"requirements": [
{
"name": "pyarrow",
"specs": [
[
">=",
"10.0.0"
]
]
},
{
"name": "langchain-core",
"specs": [
[
">=",
"0.1.0"
]
]
}
],
"lcname": "langchain-callback-parquet-logger"
}