LLM-Bridge


NameLLM-Bridge JSON
Version 1.7.9 PyPI version JSON
download
home_pageNone
SummaryA Bridge for LLMs
upload_time2025-07-10 13:18:12
maintainerNone
docs_urlNone
authorNone
requires_python>=3.12
licenseNone
keywords llm ai
VCS
bugtrack_url
requirements No requirements were recorded.
Travis-CI No Travis.
coveralls test coverage No coveralls.
            # LLM Bridge

LLM Bridge is a unified Python interface for interacting with LLMs, including OpenAI, OpenAI-Azure, OpenAI-GitHub, Gemini, Claude, and Grok.

GitHub: [https://github.com/windsnow1025/LLM-Bridge](https://github.com/windsnow1025/LLM-Bridge)

PyPI: [https://pypi.org/project/LLM-Bridge/](https://pypi.org/project/LLM-Bridge/)

## Workflow and Features

1. **Message Preprocessor**: extracts text content from documents (Word, Excel, PPT, Code files, PDFs) which are not natively supported by the target model.
2. **Chat Client Factory**: create a client for the specific LLM API with model parameters
    1. **Model Message Converter**: convert general messages to model messages
        1. **Media Processor**: converts media (Image, Audio, Video, PDF) which are natively supported by the target model into compatible formats.
3. **Chat Client**: generate stream or non-stream responses
    1. **Model Thoughts**: captures and formats the model's thinking process
    2. **Search Citations**: extracts and formats citations from search results
    3. **Token Counter**: tracks and reports input and output token usage

### Model Features

The features listed represent the maximum capabilities of each API type, not necessarily those of every individual model.

| Model Type | Input Format                   | Capabilities         | Output Format |
|------------|--------------------------------|----------------------|---------------|
| OpenAI     | Text, Image                    | Thinking, Web Search | Text          |
| Gemini     | Text, Image, Video, Audio, PDF | Thinking, Web Search | Text, Image   |
| Claude     | Text, Image, PDF               | Thinking, Web Search | Text          |
| Grok       | Text, Image                    |                      | Text          |

## Installation

```bash
pip install --upgrade llm_bridge
```

## Test

```bash
pytest
```

## Quick Start

See `./usage/`

### Workflow

```python
from typing import AsyncGenerator

from llm_bridge import *


async def workflow(
        api_keys: dict[str, str],
        messages: list[Message],
        model: str,
        api_type: str,
        temperature: float,
        stream: bool
) -> ChatResponse | AsyncGenerator[ChatResponse, None]:
    await preprocess_messages(messages, api_type)

    chat_client = await create_chat_client(
        messages=messages,
        model=model,
        api_type=api_type,
        temperature=temperature,
        stream=stream,
        api_keys=api_keys,
    )

    if stream:
        return chat_client.generate_stream_response()
    else:
        return await chat_client.generate_non_stream_response()
```

### Main

```python
import asyncio
import logging
import os
from pprint import pprint

from dotenv import load_dotenv

from llm_bridge import *
from usage.workflow import workflow

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

load_dotenv(".env")

api_keys = {
    "OPENAI_API_KEY": os.environ.get("OPENAI_API_KEY"),
    "AZURE_API_KEY": os.environ.get("AZURE_API_KEY"),
    "AZURE_API_BASE": os.environ.get("AZURE_API_BASE"),
    "GITHUB_API_KEY": os.environ.get("GITHUB_API_KEY"),
    "GEMINI_FREE_API_KEY": os.environ.get("GEMINI_FREE_API_KEY"),
    "GEMINI_PAID_API_KEY": os.environ.get("GEMINI_PAID_API_KEY"),
    "ANTHROPIC_API_KEY": os.environ.get("ANTHROPIC_API_KEY"),
    "XAI_API_KEY": os.environ.get("XAI_API_KEY"),
}

messages = [
    Message(
        role=Role.System,
        contents=[
            Content(type=ContentType.Text, data="You are a helpful assistant.")
        ]
    ),
    Message(
        role=Role.User,
        contents=[
            Content(type=ContentType.Text, data="Hello")
        ]
    ),
    Message(
        role=Role.Assistant,
        contents=[
            Content(type=ContentType.Text, data="Hello! How can I assist you today?")
        ]
    ),
    Message(
        role=Role.User,
        contents=[
            Content(type=ContentType.Text, data="Explain the concept of Occam's Razor and provide a simple, everyday example."),
            # Content(type=ContentType.Text, data="What's the weather in NYC today?"),
            # Content(type=ContentType.Text, data="Please generate an image of a cat."),
        ]
    ),
    # Message(
    #     role=Role.User,
    #     contents=[
    #         # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746208707489-image.png"),
    #         # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746209841847-A%20Tutorial%20on%20Spectral%20Clustering.pdf"),
    #         # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212253473-file_example_MP3_700KB.mp3"),
    #         # Content(type=ContentType.File, data="https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212980820-file_example_MP4_480_1_5MG.mp4"),
    #         Content(type=ContentType.Text, data="What's this?"),
    #     ]
    # ),
]
# See /llm_bridge/resources/model_prices.json for available models
# model = "gpt-4.1"
# model = "gemini-2.5-flash-preview-native-audio-dialog"
# model = "gemini-2.5-pro-exp-03-25"
model = "gemini-2.5-pro-preview-05-06"
# model = "claude-sonnet-4-0"
# api_type = "OpenAI"
# api_type = "Gemini-Free"
api_type = "Gemini-Paid"
# api_type = "Claude"
temperature = 0
stream = True


async def main():
    model_prices = get_model_prices()
    pprint(model_prices)

    input_tokens = 0
    output_tokens = 0
    response = await workflow(api_keys, messages, model, api_type, temperature, stream)
    text = ""
    if stream:
        async for chunk in response:
            pprint(chunk)
            if chunk.text:
                text += chunk.text
            if chunk.input_tokens:
                input_tokens = chunk.input_tokens
            if chunk.output_tokens:
                output_tokens += chunk.output_tokens
    else:
        pprint(response)
        text = response.text
        input_tokens = response.input_tokens
        output_tokens = response.output_tokens
    total_cost = calculate_chat_cost(api_type, model, input_tokens, output_tokens)
    print(text)
    print(f'Input tokens: {input_tokens}, Output tokens: {output_tokens}, Total cost: ${total_cost}')


if __name__ == "__main__":
    asyncio.run(main())
```

            

Raw data

            {
    "_id": null,
    "home_page": null,
    "name": "LLM-Bridge",
    "maintainer": null,
    "docs_url": null,
    "requires_python": ">=3.12",
    "maintainer_email": null,
    "keywords": "llm, ai",
    "author": null,
    "author_email": "windsnow1025 <windsnow1025@gmail.com>",
    "download_url": "https://files.pythonhosted.org/packages/74/86/cc635175a45e8c92673807255b79394c3649c7fdaf8b3f231c3b64135c70/llm_bridge-1.7.9.tar.gz",
    "platform": null,
    "description": "# LLM Bridge\n\nLLM Bridge is a unified Python interface for interacting with LLMs, including OpenAI, OpenAI-Azure, OpenAI-GitHub, Gemini, Claude, and Grok.\n\nGitHub: [https://github.com/windsnow1025/LLM-Bridge](https://github.com/windsnow1025/LLM-Bridge)\n\nPyPI: [https://pypi.org/project/LLM-Bridge/](https://pypi.org/project/LLM-Bridge/)\n\n## Workflow and Features\n\n1. **Message Preprocessor**: extracts text content from documents (Word, Excel, PPT, Code files, PDFs) which are not natively supported by the target model.\n2. **Chat Client Factory**: create a client for the specific LLM API with model parameters\n    1. **Model Message Converter**: convert general messages to model messages\n        1. **Media Processor**: converts media (Image, Audio, Video, PDF) which are natively supported by the target model into compatible formats.\n3. **Chat Client**: generate stream or non-stream responses\n    1. **Model Thoughts**: captures and formats the model's thinking process\n    2. **Search Citations**: extracts and formats citations from search results\n    3. **Token Counter**: tracks and reports input and output token usage\n\n### Model Features\n\nThe features listed represent the maximum capabilities of each API type, not necessarily those of every individual model.\n\n| Model Type | Input Format                   | Capabilities         | Output Format |\n|------------|--------------------------------|----------------------|---------------|\n| OpenAI     | Text, Image                    | Thinking, Web Search | Text          |\n| Gemini     | Text, Image, Video, Audio, PDF | Thinking, Web Search | Text, Image   |\n| Claude     | Text, Image, PDF               | Thinking, Web Search | Text          |\n| Grok       | Text, Image                    |                      | Text          |\n\n## Installation\n\n```bash\npip install --upgrade llm_bridge\n```\n\n## Test\n\n```bash\npytest\n```\n\n## Quick Start\n\nSee `./usage/`\n\n### Workflow\n\n```python\nfrom typing import AsyncGenerator\n\nfrom llm_bridge import *\n\n\nasync def workflow(\n        api_keys: dict[str, str],\n        messages: list[Message],\n        model: str,\n        api_type: str,\n        temperature: float,\n        stream: bool\n) -> ChatResponse | AsyncGenerator[ChatResponse, None]:\n    await preprocess_messages(messages, api_type)\n\n    chat_client = await create_chat_client(\n        messages=messages,\n        model=model,\n        api_type=api_type,\n        temperature=temperature,\n        stream=stream,\n        api_keys=api_keys,\n    )\n\n    if stream:\n        return chat_client.generate_stream_response()\n    else:\n        return await chat_client.generate_non_stream_response()\n```\n\n### Main\n\n```python\nimport asyncio\nimport logging\nimport os\nfrom pprint import pprint\n\nfrom dotenv import load_dotenv\n\nfrom llm_bridge import *\nfrom usage.workflow import workflow\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n\nload_dotenv(\".env\")\n\napi_keys = {\n    \"OPENAI_API_KEY\": os.environ.get(\"OPENAI_API_KEY\"),\n    \"AZURE_API_KEY\": os.environ.get(\"AZURE_API_KEY\"),\n    \"AZURE_API_BASE\": os.environ.get(\"AZURE_API_BASE\"),\n    \"GITHUB_API_KEY\": os.environ.get(\"GITHUB_API_KEY\"),\n    \"GEMINI_FREE_API_KEY\": os.environ.get(\"GEMINI_FREE_API_KEY\"),\n    \"GEMINI_PAID_API_KEY\": os.environ.get(\"GEMINI_PAID_API_KEY\"),\n    \"ANTHROPIC_API_KEY\": os.environ.get(\"ANTHROPIC_API_KEY\"),\n    \"XAI_API_KEY\": os.environ.get(\"XAI_API_KEY\"),\n}\n\nmessages = [\n    Message(\n        role=Role.System,\n        contents=[\n            Content(type=ContentType.Text, data=\"You are a helpful assistant.\")\n        ]\n    ),\n    Message(\n        role=Role.User,\n        contents=[\n            Content(type=ContentType.Text, data=\"Hello\")\n        ]\n    ),\n    Message(\n        role=Role.Assistant,\n        contents=[\n            Content(type=ContentType.Text, data=\"Hello! How can I assist you today?\")\n        ]\n    ),\n    Message(\n        role=Role.User,\n        contents=[\n            Content(type=ContentType.Text, data=\"Explain the concept of Occam's Razor and provide a simple, everyday example.\"),\n            # Content(type=ContentType.Text, data=\"What's the weather in NYC today?\"),\n            # Content(type=ContentType.Text, data=\"Please generate an image of a cat.\"),\n        ]\n    ),\n    # Message(\n    #     role=Role.User,\n    #     contents=[\n    #         # Content(type=ContentType.File, data=\"https://www.windsnow1025.com/minio/windsnow/uploads/1/1746208707489-image.png\"),\n    #         # Content(type=ContentType.File, data=\"https://www.windsnow1025.com/minio/windsnow/uploads/1/1746209841847-A%20Tutorial%20on%20Spectral%20Clustering.pdf\"),\n    #         # Content(type=ContentType.File, data=\"https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212253473-file_example_MP3_700KB.mp3\"),\n    #         # Content(type=ContentType.File, data=\"https://www.windsnow1025.com/minio/windsnow/uploads/1/1746212980820-file_example_MP4_480_1_5MG.mp4\"),\n    #         Content(type=ContentType.Text, data=\"What's this?\"),\n    #     ]\n    # ),\n]\n# See /llm_bridge/resources/model_prices.json for available models\n# model = \"gpt-4.1\"\n# model = \"gemini-2.5-flash-preview-native-audio-dialog\"\n# model = \"gemini-2.5-pro-exp-03-25\"\nmodel = \"gemini-2.5-pro-preview-05-06\"\n# model = \"claude-sonnet-4-0\"\n# api_type = \"OpenAI\"\n# api_type = \"Gemini-Free\"\napi_type = \"Gemini-Paid\"\n# api_type = \"Claude\"\ntemperature = 0\nstream = True\n\n\nasync def main():\n    model_prices = get_model_prices()\n    pprint(model_prices)\n\n    input_tokens = 0\n    output_tokens = 0\n    response = await workflow(api_keys, messages, model, api_type, temperature, stream)\n    text = \"\"\n    if stream:\n        async for chunk in response:\n            pprint(chunk)\n            if chunk.text:\n                text += chunk.text\n            if chunk.input_tokens:\n                input_tokens = chunk.input_tokens\n            if chunk.output_tokens:\n                output_tokens += chunk.output_tokens\n    else:\n        pprint(response)\n        text = response.text\n        input_tokens = response.input_tokens\n        output_tokens = response.output_tokens\n    total_cost = calculate_chat_cost(api_type, model, input_tokens, output_tokens)\n    print(text)\n    print(f'Input tokens: {input_tokens}, Output tokens: {output_tokens}, Total cost: ${total_cost}')\n\n\nif __name__ == \"__main__\":\n    asyncio.run(main())\n```\n",
    "bugtrack_url": null,
    "license": null,
    "summary": "A Bridge for LLMs",
    "version": "1.7.9",
    "project_urls": null,
    "split_keywords": [
        "llm",
        " ai"
    ],
    "urls": [
        {
            "comment_text": null,
            "digests": {
                "blake2b_256": "c66e39c582937c97c51a6568ca91aa96cabd7de866a5486bb8805569de46f7bb",
                "md5": "6c1f85a73ad5c9293acf6afed135edac",
                "sha256": "1b102591696b5f8ef8f085684936b746697027d60d373742bc8614cb08acbe5f"
            },
            "downloads": -1,
            "filename": "llm_bridge-1.7.9-py3-none-any.whl",
            "has_sig": false,
            "md5_digest": "6c1f85a73ad5c9293acf6afed135edac",
            "packagetype": "bdist_wheel",
            "python_version": "py3",
            "requires_python": ">=3.12",
            "size": 42012,
            "upload_time": "2025-07-10T13:18:10",
            "upload_time_iso_8601": "2025-07-10T13:18:10.967762Z",
            "url": "https://files.pythonhosted.org/packages/c6/6e/39c582937c97c51a6568ca91aa96cabd7de866a5486bb8805569de46f7bb/llm_bridge-1.7.9-py3-none-any.whl",
            "yanked": false,
            "yanked_reason": null
        },
        {
            "comment_text": null,
            "digests": {
                "blake2b_256": "7486cc635175a45e8c92673807255b79394c3649c7fdaf8b3f231c3b64135c70",
                "md5": "01f22045dc73d42d28c237f69e4faf73",
                "sha256": "cb0e32e3da8f397df42abe4f57d998bcbea8c9f91c2e4ed1bb798d4c6f65ed6e"
            },
            "downloads": -1,
            "filename": "llm_bridge-1.7.9.tar.gz",
            "has_sig": false,
            "md5_digest": "01f22045dc73d42d28c237f69e4faf73",
            "packagetype": "sdist",
            "python_version": "source",
            "requires_python": ">=3.12",
            "size": 23051,
            "upload_time": "2025-07-10T13:18:12",
            "upload_time_iso_8601": "2025-07-10T13:18:12.553051Z",
            "url": "https://files.pythonhosted.org/packages/74/86/cc635175a45e8c92673807255b79394c3649c7fdaf8b3f231c3b64135c70/llm_bridge-1.7.9.tar.gz",
            "yanked": false,
            "yanked_reason": null
        }
    ],
    "upload_time": "2025-07-10 13:18:12",
    "github": false,
    "gitlab": false,
    "bitbucket": false,
    "codeberg": false,
    "lcname": "llm-bridge"
}
        
Elapsed time: 0.49783s