# VM-X SDK for Python
## Description
VM-X AI SDK client for Python
## Installation
```bash
pip install vm-x-ai-sdk
```
```bash
poetry add vm-x-ai-sdk
```
## Create VMXClient
```python
from vmxai import (
CompletionRequest,
RequestMessage,
RequestMessageToolCall,
RequestMessageToolCallFunction,
RequestToolFunction,
RequestTools,
VMXClient,
VMXClientOAuth,
)
client = VMXClient(
domain="env-abc123.clnt.dev.vm-x.ai", # (Or VMX_DOMAIN env variable)
# API Key (Or VMX_API_KEY env variable)
api_key="abc123",
)
# Streaming
streaming_response = client.completion(
request=CompletionRequest(
resource="default",
messages=[
RequestMessage(
role="user",
content="Hey there!",
)
],
),
)
for message in streaming_response:
print(message.message, end="", flush=True)
```
## Examples
### Non-Streaming
```python
from vmxai import (
CompletionRequest,
RequestMessage,
VMXClient,
)
client = VMXClient()
response = client.completion(
request=CompletionRequest(
resource="default",
messages=[
RequestMessage(
role="user",
content="Hey there!",
)
],
),
stream=False,
)
print(response.message)
```
### Streaming
```python
from vmxai import (
CompletionRequest,
RequestMessage,
VMXClient,
)
client = VMXClient()
streaming_response = client.completion(
request=CompletionRequest(
resource="default",
messages=[
RequestMessage(
role="user",
content="Hey there!",
)
],
),
)
for message in streaming_response:
print(message.message, end="", flush=True)
```
### Tool Call
```python
from vmxai import (
CompletionRequest,
RequestMessage,
RequestMessageToolCall,
RequestMessageToolCallFunction,
RequestToolFunction,
RequestTools,
VMXClient,
)
client = VMXClient()
# Function Calling
function_response = client.completion(
request=CompletionRequest(
resource="default",
messages=[
RequestMessage(
role="user",
content="whats the temperature in Dallas, New York and San Diego?",
)
],
tools=[
RequestTools(
type="function",
function=RequestToolFunction(
name="get_weather",
description="Lookup the temperature",
parameters={
"type": "object",
"properties": {"city": {"description": "City you want to get the temperature"}},
"required": ["city"],
},
),
)
],
),
)
print("Function Response")
print("#" * 100)
for message in function_response:
print(message, end="")
print("\n" * 2)
# Function Calling Callback
function_response_callback = client.completion(
request=CompletionRequest(
resource="default",
messages=[
RequestMessage(
role="user",
content="whats the temperature in Dallas, New York and San Diego?",
),
RequestMessage(
role="assistant",
tool_calls=[
RequestMessageToolCall(
id="call_NLcWB6VCdG6x9UW6xrGVTTTR",
type="function",
function=RequestMessageToolCallFunction(name="get_weather", arguments='{"city": "Dallas"}'),
),
RequestMessageToolCall(
id="call_6RDTuEDsaHvWr8XjwDXx4UjX",
type="function",
function=RequestMessageToolCallFunction(name="get_weather", arguments='{"city": "New York"}'),
),
RequestMessageToolCall(
id="call_NsFzeGVbAWl5bor6RrUDCvTv",
type="function",
function=RequestMessageToolCallFunction(name="get_weather", arguments='{"city": "San Diego"}'),
),
],
),
RequestMessage(
role="tool", content="The temperature in Dallas is 81F", tool_call_id="call_NLcWB6VCdG6x9UW6xrGVTTTR"
),
RequestMessage(
role="tool", content="The temperature in New York is 78F", tool_call_id="call_6RDTuEDsaHvWr8XjwDXx4UjX"
),
RequestMessage(
role="tool", content="The temperature in San Diego is 68F", tool_call_id="call_NsFzeGVbAWl5bor6RrUDCvTv"
),
],
tools=[
RequestTools(
type="function",
function=RequestToolFunction(
name="get_weather",
description="Lookup the temperature",
parameters={
"type": "object",
"properties": {"city": {"description": "City you want to get the temperature"}},
"required": ["city"],
},
),
)
],
),
)
print("Function Callback Response")
print("#" * 100)
for message in function_response_callback:
print(message.message, end="")
```
### Multi-Answer
```python
import asyncio
from typing import Iterator
from blessings import Terminal
from vmxai import (
CompletionRequest,
CompletionResponse,
RequestMessage,
VMXClient,
)
term = Terminal()
client = VMXClient()
async def print_streaming_response(response: asyncio.Task[Iterator[CompletionResponse]], term_location: int):
"""
Print a streaming response to the terminal at a specific terminal location.
So, we can demonstrate multiple streaming responses in parallel.
Args:
response (asyncio.Task[Iterator[CompletionResponse]]): Streaming response task
term_location (int): Terminal location to print the response
"""
first = True
with term.location(y=term_location):
result = await response
x = 0
y = term_location + 3
for message in result:
if first:
print("\nModel: ", message.metadata.model)
first = False
# Some models start with 2 new lines, this is to remove them
if message.message.startswith("\n\n"):
message.message = message.message[2:]
await asyncio.sleep(0.01)
print(term.move(y, x) + message.message)
x += len(message.message)
if x > term.width:
x = 0
y += 1
async def multi_answer():
# Please make sure that the "default" resource have 3 providers configured in the VM-X Console.
resp1, resp2, resp3 = client.completion(
request=CompletionRequest(
resource="default",
messages=[
RequestMessage(
role="user",
content="Hey there, how are you?",
)
],
),
multi_answer=True,
)
print("Multi-Answer Streaming Response")
print("#" * 100)
await asyncio.gather(
*[print_streaming_response(resp1, 10), print_streaming_response(resp2, 16), print_streaming_response(resp3, 20)]
)
print("\n" * 7)
resp1, resp2, resp3 = client.completion(
request=CompletionRequest(
resource="default",
messages=[
RequestMessage(
role="user",
content="Hey there, how are you?",
)
],
),
stream=False,
multi_answer=True,
)
print("Multi-Answer Non-Streaming Response")
print("#" * 100)
async def _print(resp):
result = await resp
print(result.message, flush=True)
await asyncio.gather(*[_print(resp1), _print(resp2), _print(resp3)])
asyncio.run(multi_answer())
```
## [Change Log](./CHANGELOG.md)
Raw data
{
"_id": null,
"home_page": "https://github.com/vm-x-ai/vm-x-ai-sdk",
"name": "vm-x-ai-sdk",
"maintainer": "VM-X Engineering",
"docs_url": null,
"requires_python": "<4,>=3.8",
"maintainer_email": "eng@vm-x.ai",
"keywords": "VM-X, AI, SDK, Python",
"author": "VM-X Engineering",
"author_email": "eng@vm-x.ai",
"download_url": "https://files.pythonhosted.org/packages/f9/6c/d9f9f344d3257bf973bb16da1cd12b14dd05937d5c2aa2433574b719ab92/vm_x_ai_sdk-1.2.2.tar.gz",
"platform": null,
"description": "# VM-X SDK for Python\n\n## Description\n\nVM-X AI SDK client for Python\n\n## Installation\n\n```bash\npip install vm-x-ai-sdk\n```\n\n```bash\npoetry add vm-x-ai-sdk\n```\n\n## Create VMXClient\n\n```python\n\nfrom vmxai import (\n CompletionRequest,\n RequestMessage,\n RequestMessageToolCall,\n RequestMessageToolCallFunction,\n RequestToolFunction,\n RequestTools,\n VMXClient,\n VMXClientOAuth,\n)\n\nclient = VMXClient(\n domain=\"env-abc123.clnt.dev.vm-x.ai\", # (Or VMX_DOMAIN env variable)\n # API Key (Or VMX_API_KEY env variable)\n api_key=\"abc123\",\n)\n\n# Streaming\nstreaming_response = client.completion(\n request=CompletionRequest(\n resource=\"default\",\n messages=[\n RequestMessage(\n role=\"user\",\n content=\"Hey there!\",\n )\n ],\n ),\n)\n\nfor message in streaming_response:\n print(message.message, end=\"\", flush=True)\n\n```\n\n## Examples\n\n### Non-Streaming\n\n```python\n\nfrom vmxai import (\n CompletionRequest,\n RequestMessage,\n VMXClient,\n)\n\nclient = VMXClient()\n\nresponse = client.completion(\n request=CompletionRequest(\n resource=\"default\",\n messages=[\n RequestMessage(\n role=\"user\",\n content=\"Hey there!\",\n )\n ],\n ),\n stream=False,\n)\n\nprint(response.message)\n\n```\n\n### Streaming\n\n```python\n\nfrom vmxai import (\n CompletionRequest,\n RequestMessage,\n VMXClient,\n)\n\nclient = VMXClient()\n\nstreaming_response = client.completion(\n request=CompletionRequest(\n resource=\"default\",\n messages=[\n RequestMessage(\n role=\"user\",\n content=\"Hey there!\",\n )\n ],\n ),\n)\n\nfor message in streaming_response:\n print(message.message, end=\"\", flush=True)\n\n```\n\n### Tool Call\n\n```python\n\nfrom vmxai import (\n CompletionRequest,\n RequestMessage,\n RequestMessageToolCall,\n RequestMessageToolCallFunction,\n RequestToolFunction,\n RequestTools,\n VMXClient,\n)\n\nclient = VMXClient()\n\n# Function Calling\nfunction_response = client.completion(\n request=CompletionRequest(\n resource=\"default\",\n messages=[\n RequestMessage(\n role=\"user\",\n content=\"whats the temperature in Dallas, New York and San Diego?\",\n )\n ],\n tools=[\n RequestTools(\n type=\"function\",\n function=RequestToolFunction(\n name=\"get_weather\",\n description=\"Lookup the temperature\",\n parameters={\n \"type\": \"object\",\n \"properties\": {\"city\": {\"description\": \"City you want to get the temperature\"}},\n \"required\": [\"city\"],\n },\n ),\n )\n ],\n ),\n)\n\nprint(\"Function Response\")\nprint(\"#\" * 100)\nfor message in function_response:\n print(message, end=\"\")\n\nprint(\"\\n\" * 2)\n\n# Function Calling Callback\nfunction_response_callback = client.completion(\n request=CompletionRequest(\n resource=\"default\",\n messages=[\n RequestMessage(\n role=\"user\",\n content=\"whats the temperature in Dallas, New York and San Diego?\",\n ),\n RequestMessage(\n role=\"assistant\",\n tool_calls=[\n RequestMessageToolCall(\n id=\"call_NLcWB6VCdG6x9UW6xrGVTTTR\",\n type=\"function\",\n function=RequestMessageToolCallFunction(name=\"get_weather\", arguments='{\"city\": \"Dallas\"}'),\n ),\n RequestMessageToolCall(\n id=\"call_6RDTuEDsaHvWr8XjwDXx4UjX\",\n type=\"function\",\n function=RequestMessageToolCallFunction(name=\"get_weather\", arguments='{\"city\": \"New York\"}'),\n ),\n RequestMessageToolCall(\n id=\"call_NsFzeGVbAWl5bor6RrUDCvTv\",\n type=\"function\",\n function=RequestMessageToolCallFunction(name=\"get_weather\", arguments='{\"city\": \"San Diego\"}'),\n ),\n ],\n ),\n RequestMessage(\n role=\"tool\", content=\"The temperature in Dallas is 81F\", tool_call_id=\"call_NLcWB6VCdG6x9UW6xrGVTTTR\"\n ),\n RequestMessage(\n role=\"tool\", content=\"The temperature in New York is 78F\", tool_call_id=\"call_6RDTuEDsaHvWr8XjwDXx4UjX\"\n ),\n RequestMessage(\n role=\"tool\", content=\"The temperature in San Diego is 68F\", tool_call_id=\"call_NsFzeGVbAWl5bor6RrUDCvTv\"\n ),\n ],\n tools=[\n RequestTools(\n type=\"function\",\n function=RequestToolFunction(\n name=\"get_weather\",\n description=\"Lookup the temperature\",\n parameters={\n \"type\": \"object\",\n \"properties\": {\"city\": {\"description\": \"City you want to get the temperature\"}},\n \"required\": [\"city\"],\n },\n ),\n )\n ],\n ),\n)\n\nprint(\"Function Callback Response\")\nprint(\"#\" * 100)\nfor message in function_response_callback:\n print(message.message, end=\"\")\n```\n\n### Multi-Answer\n\n```python\nimport asyncio\nfrom typing import Iterator\n\nfrom blessings import Terminal\nfrom vmxai import (\n CompletionRequest,\n CompletionResponse,\n RequestMessage,\n VMXClient,\n)\n\nterm = Terminal()\nclient = VMXClient()\n\n\nasync def print_streaming_response(response: asyncio.Task[Iterator[CompletionResponse]], term_location: int):\n \"\"\"\n Print a streaming response to the terminal at a specific terminal location.\n So, we can demonstrate multiple streaming responses in parallel.\n\n Args:\n response (asyncio.Task[Iterator[CompletionResponse]]): Streaming response task\n term_location (int): Terminal location to print the response\n \"\"\"\n first = True\n with term.location(y=term_location):\n result = await response\n x = 0\n y = term_location + 3\n for message in result:\n if first:\n print(\"\\nModel: \", message.metadata.model)\n first = False\n # Some models start with 2 new lines, this is to remove them\n if message.message.startswith(\"\\n\\n\"):\n message.message = message.message[2:]\n\n await asyncio.sleep(0.01)\n print(term.move(y, x) + message.message)\n x += len(message.message)\n if x > term.width:\n x = 0\n y += 1\n\n\nasync def multi_answer():\n # Please make sure that the \"default\" resource have 3 providers configured in the VM-X Console.\n resp1, resp2, resp3 = client.completion(\n request=CompletionRequest(\n resource=\"default\",\n messages=[\n RequestMessage(\n role=\"user\",\n content=\"Hey there, how are you?\",\n )\n ],\n ),\n multi_answer=True,\n )\n\n print(\"Multi-Answer Streaming Response\")\n print(\"#\" * 100)\n await asyncio.gather(\n *[print_streaming_response(resp1, 10), print_streaming_response(resp2, 16), print_streaming_response(resp3, 20)]\n )\n print(\"\\n\" * 7)\n\n resp1, resp2, resp3 = client.completion(\n request=CompletionRequest(\n resource=\"default\",\n messages=[\n RequestMessage(\n role=\"user\",\n content=\"Hey there, how are you?\",\n )\n ],\n ),\n stream=False,\n multi_answer=True,\n )\n\n print(\"Multi-Answer Non-Streaming Response\")\n print(\"#\" * 100)\n\n async def _print(resp):\n result = await resp\n print(result.message, flush=True)\n\n await asyncio.gather(*[_print(resp1), _print(resp2), _print(resp3)])\n\n\nasyncio.run(multi_answer())\n\n```\n\n## [Change Log](./CHANGELOG.md)\n",
"bugtrack_url": null,
"license": "MIT",
"summary": "VM-X AI Python SDK",
"version": "1.2.2",
"project_urls": {
"Homepage": "https://github.com/vm-x-ai/vm-x-ai-sdk",
"Repository": "https://github.com/vm-x-ai/vm-x-ai-sdk"
},
"split_keywords": [
"vm-x",
" ai",
" sdk",
" python"
],
"urls": [
{
"comment_text": "",
"digests": {
"blake2b_256": "464492813ac53dafcb44ec47752f5b6ff71ff7cddd8a0dd2359ded03397679cc",
"md5": "be3f4a2552e13ffd387fedc92b4c3453",
"sha256": "ea27cf5548fee128154be0e93a5984254433e39a69b7709e1f858fb0eab5f23c"
},
"downloads": -1,
"filename": "vm_x_ai_sdk-1.2.2-py3-none-any.whl",
"has_sig": false,
"md5_digest": "be3f4a2552e13ffd387fedc92b4c3453",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": "<4,>=3.8",
"size": 6737,
"upload_time": "2024-12-09T15:36:12",
"upload_time_iso_8601": "2024-12-09T15:36:12.659428Z",
"url": "https://files.pythonhosted.org/packages/46/44/92813ac53dafcb44ec47752f5b6ff71ff7cddd8a0dd2359ded03397679cc/vm_x_ai_sdk-1.2.2-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": "",
"digests": {
"blake2b_256": "f96cd9f9f344d3257bf973bb16da1cd12b14dd05937d5c2aa2433574b719ab92",
"md5": "25593e1050a2672c58483d9a4264d489",
"sha256": "ad5703da83f8cc7f08fa8858e69f7e03178e8d01df58c072ab89efb4f47b569c"
},
"downloads": -1,
"filename": "vm_x_ai_sdk-1.2.2.tar.gz",
"has_sig": false,
"md5_digest": "25593e1050a2672c58483d9a4264d489",
"packagetype": "sdist",
"python_version": "source",
"requires_python": "<4,>=3.8",
"size": 6747,
"upload_time": "2024-12-09T15:36:14",
"upload_time_iso_8601": "2024-12-09T15:36:14.780005Z",
"url": "https://files.pythonhosted.org/packages/f9/6c/d9f9f344d3257bf973bb16da1cd12b14dd05937d5c2aa2433574b719ab92/vm_x_ai_sdk-1.2.2.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2024-12-09 15:36:14",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "vm-x-ai",
"github_project": "vm-x-ai-sdk",
"travis_ci": false,
"coveralls": false,
"github_actions": true,
"lcname": "vm-x-ai-sdk"
}