# LangSwarm-Core
LangSwarm-Core is a framework designed to support multi-agent systems using Large Language Models (LLMs). It provides utilities, memory management, logging integration, agent orchestration tools, and a factory-based approach to building robust AI ecosystems with modularity and flexibility.
## Features
- **Agent Factory**: Dynamically create and configure agents for LangChain, OpenAI, Hugging Face, and LlamaIndex.
- **Wrappers**:
- **Memory Mixin**: Seamless in-memory or external memory integration.
- **Logging Mixin**: Advanced logging support using LangSmith and fallback loggers.
- **Registry**: A centralized agent registry for managing and accessing agents dynamically.
- **Modular Architecture**: Easily extend the framework by implementing additional mixins, factories, or agent types.
---
## Installation
### Prerequisites
- Python 3.8 or higher
- Install dependencies:
```bash
pip install -r requirements.txt
```
### From PyPI
```bash
pip install langswarm-core
```
---
## Usage
### Quick Start
Here's an example of how to use LangSwarm-Core to create an agent and interact with it:
```python
from langswarm.core.factory.agents import AgentFactory
# Set environment variables
os.environ['OPENAI_API_KEY'] = 'your-openai-api-key'
# Create a LangChain agent
agent = AgentFactory.create(
name="example_agent",
agent_type="langchain-openai",
model="gpt-4"
)
# Use the agent to respond to queries
response = agent.chat("What is LangSwarm?")
print(response)
```
### Memory Integration
LangSwarm-Core supports memory out of the box. Here's how to initialize an agent with memory:
```python
from langswarm.core.factory.agents import AgentFactory
from langchain.memory import ConversationBufferMemory
# Set environment variables
os.environ['OPENAI_API_KEY'] = 'your-openai-api-key'
# Define memory for the agent
memory = ConversationBufferMemory()
# Create a LangChain agent with memory using AgentFactory
agent = AgentFactory.create(
name="memory_agent",
agent_type="langchain-openai",
memory=memory,
model="gpt-4"
)
# Interact with the agent
response1 = agent.chat("What is LangSwarm-Core?")
print(f"Agent: {response1}")
response2 = agent.chat("Can you tell me more about its features?")
print(f"Agent: {response2}")
# Showcase memory retention
response3 = agent.chat("What have we discussed so far?")
print(f"Agent: {response3}")
```
#### Customizing Memory
To use a different LangChain memory type, you can replace ConversationBufferMemory with another memory class. For example:
```python
from langchain.memory import ConversationSummaryMemory
# Create a separate LLM for memory summarization
memory_llm = OpenAI(model="gpt-3.5-turbo", openai_api_key="your-openai-api-key")
# Define the memory module using the summarization LLM
memory = ConversationSummaryMemory(llm=memory_llm)
```
### LangSmith Integration
LangSwarm-Core supports LangSmith out of the box. Here's how to initialize an agent with LangSmith:
```python
from langswarm.core.factory.agents import AgentFactory
# Set environment variables
os.environ['OPENAI_API_KEY'] = 'your-openai-api-key'
# Create a LangChain agent
agent = AgentFactory.create(
name="example_agent",
agent_type="langchain-openai",
langsmith_api_key="your-langsmith-api-key",
model="gpt-4"
)
# Use the agent to respond to queries
response = agent.chat("What is LangSwarm?")
print(response)
```
### Bring your own agent
LangSwarm-Core supports other agents out of the box. Here's how to wrap an external agent with LangSwarm:
#### Hugging Face
```python
from transformers import pipeline
from langswarm.core.wrappers.generic import AgentWrapper
# Step 1: Create a Hugging Face pipeline
huggingface_agent = pipeline("text-generation", model="gpt2")
# Step 2: Wrap the Hugging Face agent using LangSwarm's AgentWrapper
wrapped_agent = AgentWrapper(
name="my_agent",
agent=huggingface_agent,
memory=None, # Optional: Add memory support if needed
is_conversational=True # Enable conversational context if needed (only for Hugging Face)
)
# Step 3: Interact with the wrapped agent
query = "Explain the concept of modular AI frameworks."
response = wrapped_agent.chat(query)
print(f"User: {query}")
print(f"Agent: {response}")
# Step 4: Add more interactions to showcase memory retention (if enabled)
wrapped_agent.chat("Can you elaborate on the benefits of modularity?")
response = wrapped_agent.chat("What was the initial query?")
print(f"Agent: {response}")
# Step 5: Reset the memory and start a new conversation
wrapped_agent.chat("Let's reset and discuss LangSwarm-Core.", reset=True)
response = wrapped_agent.chat("What is LangSwarm-Core?")
print(f"Agent: {response}")
```
#### Hugging Face
```python
from langchain.llms import OpenAI
from langchain.chains import SimpleSequentialChain, LLMChain
from langchain.prompts import PromptTemplate
from langswarm.core.wrappers.generic import AgentWrapper
# Step 1: Create a LangChain LLM instance
llm = OpenAI(model="gpt-3.5-turbo", openai_api_key="your-openai-api-key")
# Step 2: Build a LangChain pipeline (e.g., a simple sequential chain)
template = PromptTemplate(template="What is {topic}? Explain in detail.")
llm_chain = LLMChain(llm=llm, prompt=template)
# Step 3: Wrap the LangChain agent using LangSwarm's AgentWrapper
wrapped_langchain_agent = AgentWrapper(
name="langchain_agent",
agent=llm_chain,
memory=None, # Optionally add memory
)
# Step 4: Interact with the wrapped LangChain agent
query = {"topic": "LangSwarm-Core"}
response = wrapped_langchain_agent.chat(query)
print(f"User: What is LangSwarm-Core?")
print(f"Agent: {response}")
# Step 5: Showcase conversational context (if memory is enabled)
wrapped_langchain_agent.chat("Can you summarize your explanation?")
response = wrapped_langchain_agent.chat("What was the topic of discussion?")
print(f"Agent: {response}")
# Step 6: Reset the agent and start a new conversation
wrapped_langchain_agent.chat("Reset and start over.", reset=True)
response = wrapped_langchain_agent.chat({"topic": "modular AI frameworks"})
print(f"Agent: {response}")
```
---
## Components
### Factory
The `AgentFactory` provides a simple interface for creating agents:
- Supports LangChain, Hugging Face, OpenAI, and LlamaIndex agents.
- Configurable with memory, logging, and other custom parameters.
### Wrappers
Wrappers extend agent capabilities:
- **MemoryMixin**: Adds memory management functionality.
- **LoggingMixin**: Integrates LangSmith for advanced logging.
### Utilities
Helper functions include:
- Token and cost estimation.
- Text processing and cleaning.
- JSON, YAML, and Python code validation.
### Registry
The `AgentRegistry` provides a centralized way to manage and query all agents created in the system.
---
## Development
### Setting Up the Environment
1. Clone the repository:
```bash
git clone https://github.com/aekdahl/langswarm-core.git
cd langswarm-core
```
2. Create a virtual environment:
```bash
python3 -m venv venv
source venv/bin/activate
```
3. Install dependencies:
```bash
pip install -r requirements.txt
```
### Running Tests
Run tests located in the `tests/` directory using `pytest`:
```bash
pytest
```
---
## File Structure
- `core/wrappers/`: Contains mixins for memory and logging.
- `core/factory/`: Defines the `AgentFactory` for creating agents.
- `core/registry/`: Manages a centralized agent registry.
- `core/utils/`: Provides utility functions for validation, token management, and text processing.
---
## Contributing
We welcome contributions! To get started:
1. Fork the repository.
2. Create a feature branch.
3. Make your changes and write tests.
4. Submit a pull request.
---
## Roadmap
- Add support for additional LLM providers.
- Expand orchestration capabilities with reinforcement learning agents.
- Introduce support for dynamic task allocation and meta-agent coordination.
---
## License
This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details.
---
## Acknowledgments
LangSwarm-Core relies on several amazing libraries, including:
- [LangChain](https://github.com/hwchase17/langchain)
- [Hugging Face Transformers](https://huggingface.co/transformers/)
- [LlamaIndex](https://github.com/jerryjliu/llama_index)
---
Raw data
{
"_id": null,
"home_page": "https://github.com/aekdahl/langswarm-core",
"name": "langswarm-core",
"maintainer": null,
"docs_url": null,
"requires_python": ">=3.8",
"maintainer_email": null,
"keywords": null,
"author": "Alexander Ekdahl",
"author_email": "alexander.ekdahl@gmail.com",
"download_url": "https://files.pythonhosted.org/packages/ef/38/e1e2de282ef94dae485c22256a31fd412be880ffb3be59fc384c1fb40505/langswarm_core-0.0.18.tar.gz",
"platform": null,
"description": "# LangSwarm-Core\n\nLangSwarm-Core is a framework designed to support multi-agent systems using Large Language Models (LLMs). It provides utilities, memory management, logging integration, agent orchestration tools, and a factory-based approach to building robust AI ecosystems with modularity and flexibility.\n\n## Features\n\n- **Agent Factory**: Dynamically create and configure agents for LangChain, OpenAI, Hugging Face, and LlamaIndex.\n- **Wrappers**:\n - **Memory Mixin**: Seamless in-memory or external memory integration.\n - **Logging Mixin**: Advanced logging support using LangSmith and fallback loggers.\n- **Registry**: A centralized agent registry for managing and accessing agents dynamically.\n- **Modular Architecture**: Easily extend the framework by implementing additional mixins, factories, or agent types.\n\n---\n\n## Installation\n\n### Prerequisites\n- Python 3.8 or higher\n- Install dependencies:\n ```bash\n pip install -r requirements.txt\n ```\n\n### From PyPI\n ```bash\n pip install langswarm-core\n ```\n\n---\n\n## Usage\n\n### Quick Start\n\nHere's an example of how to use LangSwarm-Core to create an agent and interact with it:\n\n```python\nfrom langswarm.core.factory.agents import AgentFactory\n\n# Set environment variables\nos.environ['OPENAI_API_KEY'] = 'your-openai-api-key'\n\n# Create a LangChain agent\nagent = AgentFactory.create(\n name=\"example_agent\",\n agent_type=\"langchain-openai\",\n model=\"gpt-4\"\n)\n\n# Use the agent to respond to queries\nresponse = agent.chat(\"What is LangSwarm?\")\nprint(response)\n```\n\n### Memory Integration\n\nLangSwarm-Core supports memory out of the box. Here's how to initialize an agent with memory:\n\n```python\nfrom langswarm.core.factory.agents import AgentFactory\nfrom langchain.memory import ConversationBufferMemory\n\n# Set environment variables\nos.environ['OPENAI_API_KEY'] = 'your-openai-api-key'\n\n# Define memory for the agent\nmemory = ConversationBufferMemory()\n\n# Create a LangChain agent with memory using AgentFactory\nagent = AgentFactory.create(\n name=\"memory_agent\",\n agent_type=\"langchain-openai\",\n memory=memory,\n model=\"gpt-4\"\n)\n\n# Interact with the agent\nresponse1 = agent.chat(\"What is LangSwarm-Core?\")\nprint(f\"Agent: {response1}\")\n\nresponse2 = agent.chat(\"Can you tell me more about its features?\")\nprint(f\"Agent: {response2}\")\n\n# Showcase memory retention\nresponse3 = agent.chat(\"What have we discussed so far?\")\nprint(f\"Agent: {response3}\")\n```\n\n#### Customizing Memory\nTo use a different LangChain memory type, you can replace ConversationBufferMemory with another memory class. For example:\n\n```python\nfrom langchain.memory import ConversationSummaryMemory\n\n# Create a separate LLM for memory summarization\nmemory_llm = OpenAI(model=\"gpt-3.5-turbo\", openai_api_key=\"your-openai-api-key\")\n\n# Define the memory module using the summarization LLM\nmemory = ConversationSummaryMemory(llm=memory_llm)\n```\n\n### LangSmith Integration\n\nLangSwarm-Core supports LangSmith out of the box. Here's how to initialize an agent with LangSmith:\n\n```python\nfrom langswarm.core.factory.agents import AgentFactory\n\n# Set environment variables\nos.environ['OPENAI_API_KEY'] = 'your-openai-api-key'\n\n# Create a LangChain agent\nagent = AgentFactory.create(\n name=\"example_agent\",\n agent_type=\"langchain-openai\",\n langsmith_api_key=\"your-langsmith-api-key\",\n model=\"gpt-4\"\n)\n\n# Use the agent to respond to queries\nresponse = agent.chat(\"What is LangSwarm?\")\nprint(response)\n```\n\n### Bring your own agent\n\nLangSwarm-Core supports other agents out of the box. Here's how to wrap an external agent with LangSwarm:\n\n#### Hugging Face\n\n```python\nfrom transformers import pipeline\nfrom langswarm.core.wrappers.generic import AgentWrapper\n\n# Step 1: Create a Hugging Face pipeline\nhuggingface_agent = pipeline(\"text-generation\", model=\"gpt2\")\n\n# Step 2: Wrap the Hugging Face agent using LangSwarm's AgentWrapper\nwrapped_agent = AgentWrapper(\n name=\"my_agent\",\n agent=huggingface_agent,\n memory=None, # Optional: Add memory support if needed\n is_conversational=True # Enable conversational context if needed (only for Hugging Face)\n)\n\n# Step 3: Interact with the wrapped agent\nquery = \"Explain the concept of modular AI frameworks.\"\nresponse = wrapped_agent.chat(query)\nprint(f\"User: {query}\")\nprint(f\"Agent: {response}\")\n\n# Step 4: Add more interactions to showcase memory retention (if enabled)\nwrapped_agent.chat(\"Can you elaborate on the benefits of modularity?\")\nresponse = wrapped_agent.chat(\"What was the initial query?\")\nprint(f\"Agent: {response}\")\n\n# Step 5: Reset the memory and start a new conversation\nwrapped_agent.chat(\"Let's reset and discuss LangSwarm-Core.\", reset=True)\nresponse = wrapped_agent.chat(\"What is LangSwarm-Core?\")\nprint(f\"Agent: {response}\")\n```\n\n#### Hugging Face\n```python\nfrom langchain.llms import OpenAI\nfrom langchain.chains import SimpleSequentialChain, LLMChain\nfrom langchain.prompts import PromptTemplate\nfrom langswarm.core.wrappers.generic import AgentWrapper\n\n# Step 1: Create a LangChain LLM instance\nllm = OpenAI(model=\"gpt-3.5-turbo\", openai_api_key=\"your-openai-api-key\")\n\n# Step 2: Build a LangChain pipeline (e.g., a simple sequential chain)\ntemplate = PromptTemplate(template=\"What is {topic}? Explain in detail.\")\nllm_chain = LLMChain(llm=llm, prompt=template)\n\n# Step 3: Wrap the LangChain agent using LangSwarm's AgentWrapper\nwrapped_langchain_agent = AgentWrapper(\n name=\"langchain_agent\",\n agent=llm_chain,\n memory=None, # Optionally add memory\n)\n\n# Step 4: Interact with the wrapped LangChain agent\nquery = {\"topic\": \"LangSwarm-Core\"}\nresponse = wrapped_langchain_agent.chat(query)\nprint(f\"User: What is LangSwarm-Core?\")\nprint(f\"Agent: {response}\")\n\n# Step 5: Showcase conversational context (if memory is enabled)\nwrapped_langchain_agent.chat(\"Can you summarize your explanation?\")\nresponse = wrapped_langchain_agent.chat(\"What was the topic of discussion?\")\nprint(f\"Agent: {response}\")\n\n# Step 6: Reset the agent and start a new conversation\nwrapped_langchain_agent.chat(\"Reset and start over.\", reset=True)\nresponse = wrapped_langchain_agent.chat({\"topic\": \"modular AI frameworks\"})\nprint(f\"Agent: {response}\")\n```\n\n---\n\n## Components\n\n### Factory\nThe `AgentFactory` provides a simple interface for creating agents:\n- Supports LangChain, Hugging Face, OpenAI, and LlamaIndex agents.\n- Configurable with memory, logging, and other custom parameters.\n\n### Wrappers\nWrappers extend agent capabilities:\n- **MemoryMixin**: Adds memory management functionality.\n- **LoggingMixin**: Integrates LangSmith for advanced logging.\n\n### Utilities\nHelper functions include:\n- Token and cost estimation.\n- Text processing and cleaning.\n- JSON, YAML, and Python code validation.\n\n### Registry\nThe `AgentRegistry` provides a centralized way to manage and query all agents created in the system.\n\n---\n\n## Development\n\n### Setting Up the Environment\n1. Clone the repository:\n ```bash\n git clone https://github.com/aekdahl/langswarm-core.git\n cd langswarm-core\n ```\n2. Create a virtual environment:\n ```bash\n python3 -m venv venv\n source venv/bin/activate\n ```\n3. Install dependencies:\n ```bash\n pip install -r requirements.txt\n ```\n\n### Running Tests\nRun tests located in the `tests/` directory using `pytest`:\n```bash\npytest\n```\n\n---\n\n## File Structure\n- `core/wrappers/`: Contains mixins for memory and logging.\n- `core/factory/`: Defines the `AgentFactory` for creating agents.\n- `core/registry/`: Manages a centralized agent registry.\n- `core/utils/`: Provides utility functions for validation, token management, and text processing.\n\n---\n\n## Contributing\n\nWe welcome contributions! To get started:\n1. Fork the repository.\n2. Create a feature branch.\n3. Make your changes and write tests.\n4. Submit a pull request.\n\n---\n\n## Roadmap\n\n- Add support for additional LLM providers.\n- Expand orchestration capabilities with reinforcement learning agents.\n- Introduce support for dynamic task allocation and meta-agent coordination.\n\n---\n\n## License\n\nThis project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details.\n\n---\n\n## Acknowledgments\n\nLangSwarm-Core relies on several amazing libraries, including:\n- [LangChain](https://github.com/hwchase17/langchain)\n- [Hugging Face Transformers](https://huggingface.co/transformers/)\n- [LlamaIndex](https://github.com/jerryjliu/llama_index)\n\n---\n\n",
"bugtrack_url": null,
"license": "MIT",
"summary": "A core framework for multi-agent LLM ecosystems",
"version": "0.0.18",
"project_urls": {
"Homepage": "https://github.com/aekdahl/langswarm-core"
},
"split_keywords": [],
"urls": [
{
"comment_text": "",
"digests": {
"blake2b_256": "c39dde7a7a05a040a316730da445e6cb6d625da71b5292fd4c0e96595b8345bf",
"md5": "e6c1a2b52cc131e44fb43d82ef58a634",
"sha256": "185bf342f980455e5958894754711f786738e1e3f340d1468b76c6c5a6757e24"
},
"downloads": -1,
"filename": "langswarm_core-0.0.18-py3-none-any.whl",
"has_sig": false,
"md5_digest": "e6c1a2b52cc131e44fb43d82ef58a634",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": ">=3.8",
"size": 22004,
"upload_time": "2025-01-11T10:23:02",
"upload_time_iso_8601": "2025-01-11T10:23:02.409947Z",
"url": "https://files.pythonhosted.org/packages/c3/9d/de7a7a05a040a316730da445e6cb6d625da71b5292fd4c0e96595b8345bf/langswarm_core-0.0.18-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": "",
"digests": {
"blake2b_256": "ef38e1e2de282ef94dae485c22256a31fd412be880ffb3be59fc384c1fb40505",
"md5": "6d4ba4d7c59d2bb72ebb2880117f05e6",
"sha256": "d00edbbbbd6ee5df0b6cc10be3f960e72ed6c346f3a209e1cb7eec0ad88ea880"
},
"downloads": -1,
"filename": "langswarm_core-0.0.18.tar.gz",
"has_sig": false,
"md5_digest": "6d4ba4d7c59d2bb72ebb2880117f05e6",
"packagetype": "sdist",
"python_version": "source",
"requires_python": ">=3.8",
"size": 20168,
"upload_time": "2025-01-11T10:23:04",
"upload_time_iso_8601": "2025-01-11T10:23:04.586156Z",
"url": "https://files.pythonhosted.org/packages/ef/38/e1e2de282ef94dae485c22256a31fd412be880ffb3be59fc384c1fb40505/langswarm_core-0.0.18.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2025-01-11 10:23:04",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "aekdahl",
"github_project": "langswarm-core",
"travis_ci": false,
"coveralls": false,
"github_actions": true,
"requirements": [
{
"name": "langchain",
"specs": [
[
">=",
"0.2.17"
]
]
},
{
"name": "langchain_community",
"specs": [
[
">=",
"0.2.19"
]
]
},
{
"name": "langchain-openai",
"specs": [
[
">=",
"0.1.25"
]
]
},
{
"name": "tiktoken",
"specs": [
[
">=",
"0.7.0"
]
]
},
{
"name": "llama-index",
"specs": [
[
">=",
"0.11.23"
]
]
},
{
"name": "pyyaml",
"specs": [
[
">=",
"6.0.2"
]
]
},
{
"name": "openai",
"specs": [
[
">=",
"0.30.0"
]
]
}
],
"lcname": "langswarm-core"
}