mlchain-client


Namemlchain-client JSON
Version 0.1.12 PyPI version JSON
download
home_pagehttps://github.com/mlchain/mlchain
SummaryA package for interacting with the Mlchain Service-API
upload_time2024-11-26 11:51:07
maintainerNone
docs_urlNone
authorMlchain
requires_python>=3.6
licenseMIT
keywords mlchain nlp ai language-processing
VCS
bugtrack_url
requirements No requirements were recorded.
Travis-CI No Travis.
coveralls test coverage No coveralls.
            # mlchain-client

A Mlchain App Service-API Client, using for build a webapp by request Service-API

## Usage

First, install `mlchain-client` python sdk package:

```
pip install mlchain-client
```

Write your code with sdk:

- completion generate with `blocking` response_mode

```python
from mlchain_client import CompletionClient

api_key = "your_api_key"

# Initialize CompletionClient
completion_client = CompletionClient(api_key)

# Create Completion Message using CompletionClient
completion_response = completion_client.create_completion_message(inputs={"query": "What's the weather like today?"},
                                                                  response_mode="blocking", user="user_id")
completion_response.raise_for_status()

result = completion_response.json()

print(result.get('answer'))
```

- completion using vision model, like gpt-4-vision

```python
from mlchain_client import CompletionClient

api_key = "your_api_key"

# Initialize CompletionClient
completion_client = CompletionClient(api_key)

files = [{
    "type": "image",
    "transfer_method": "remote_url",
    "url": "your_image_url"
}]

# files = [{
#     "type": "image",
#     "transfer_method": "local_file",
#     "upload_file_id": "your_file_id"
# }]

# Create Completion Message using CompletionClient
completion_response = completion_client.create_completion_message(inputs={"query": "Describe the picture."},
                                                                  response_mode="blocking", user="user_id", files=files)
completion_response.raise_for_status()

result = completion_response.json()

print(result.get('answer'))
```

- chat generate with `streaming` response_mode

```python
import json
from mlchain_client import ChatClient

api_key = "your_api_key"

# Initialize ChatClient
chat_client = ChatClient(api_key)

# Create Chat Message using ChatClient
chat_response = chat_client.create_chat_message(inputs={}, query="Hello", user="user_id", response_mode="streaming")
chat_response.raise_for_status()

for line in chat_response.iter_lines(decode_unicode=True):
    line = line.split('data:', 1)[-1]
    if line.strip():
        line = json.loads(line.strip())
        print(line.get('answer'))
```

- chat using vision model, like gpt-4-vision

```python
from mlchain_client import ChatClient

api_key = "your_api_key"

# Initialize ChatClient
chat_client = ChatClient(api_key)

files = [{
    "type": "image",
    "transfer_method": "remote_url",
    "url": "your_image_url"
}]

# files = [{
#     "type": "image",
#     "transfer_method": "local_file",
#     "upload_file_id": "your_file_id"
# }]

# Create Chat Message using ChatClient
chat_response = chat_client.create_chat_message(inputs={}, query="Describe the picture.", user="user_id",
                                                response_mode="blocking", files=files)
chat_response.raise_for_status()

result = chat_response.json()

print(result.get("answer"))
```

- upload file when using vision model

```python
from mlchain_client import MlchainClient

api_key = "your_api_key"

# Initialize Client
mlchain_client = MlchainClient(api_key)

file_path = "your_image_file_path"
file_name = "panda.jpeg"
mime_type = "image/jpeg"

with open(file_path, "rb") as file:
    files = {
        "file": (file_name, file, mime_type)
    }
    response = mlchain_client.file_upload("user_id", files)

    result = response.json()
    print(f'upload_file_id: {result.get("id")}')
```
  


- Others

```python
from mlchain_client import ChatClient

api_key = "your_api_key"

# Initialize Client
client = ChatClient(api_key)

# Get App parameters
parameters = client.get_application_parameters(user="user_id")
parameters.raise_for_status()

print('[parameters]')
print(parameters.json())

# Get Conversation List (only for chat)
conversations = client.get_conversations(user="user_id")
conversations.raise_for_status()

print('[conversations]')
print(conversations.json())

# Get Message List (only for chat)
messages = client.get_conversation_messages(user="user_id", conversation_id="conversation_id")
messages.raise_for_status()

print('[messages]')
print(messages.json())

# Rename Conversation (only for chat)
rename_conversation_response = client.rename_conversation(conversation_id="conversation_id",
                                                          name="new_name", user="user_id")
rename_conversation_response.raise_for_status()

print('[rename result]')
print(rename_conversation_response.json())
```

            

Raw data

            {
    "_id": null,
    "home_page": "https://github.com/mlchain/mlchain",
    "name": "mlchain-client",
    "maintainer": null,
    "docs_url": null,
    "requires_python": ">=3.6",
    "maintainer_email": null,
    "keywords": "mlchain nlp ai language-processing",
    "author": "Mlchain",
    "author_email": "hello@mlchain.khulnasoft.com",
    "download_url": "https://files.pythonhosted.org/packages/b0/e6/f627d2655d8bf5f764f17f49fa1b75172628de1cc784697d31e234e1a8c3/mlchain_client-0.1.12.tar.gz",
    "platform": null,
    "description": "# mlchain-client\n\nA Mlchain App Service-API Client, using for build a webapp by request Service-API\n\n## Usage\n\nFirst, install `mlchain-client` python sdk package:\n\n```\npip install mlchain-client\n```\n\nWrite your code with sdk:\n\n- completion generate with `blocking` response_mode\n\n```python\nfrom mlchain_client import CompletionClient\n\napi_key = \"your_api_key\"\n\n# Initialize CompletionClient\ncompletion_client = CompletionClient(api_key)\n\n# Create Completion Message using CompletionClient\ncompletion_response = completion_client.create_completion_message(inputs={\"query\": \"What's the weather like today?\"},\n                                                                  response_mode=\"blocking\", user=\"user_id\")\ncompletion_response.raise_for_status()\n\nresult = completion_response.json()\n\nprint(result.get('answer'))\n```\n\n- completion using vision model, like gpt-4-vision\n\n```python\nfrom mlchain_client import CompletionClient\n\napi_key = \"your_api_key\"\n\n# Initialize CompletionClient\ncompletion_client = CompletionClient(api_key)\n\nfiles = [{\n    \"type\": \"image\",\n    \"transfer_method\": \"remote_url\",\n    \"url\": \"your_image_url\"\n}]\n\n# files = [{\n#     \"type\": \"image\",\n#     \"transfer_method\": \"local_file\",\n#     \"upload_file_id\": \"your_file_id\"\n# }]\n\n# Create Completion Message using CompletionClient\ncompletion_response = completion_client.create_completion_message(inputs={\"query\": \"Describe the picture.\"},\n                                                                  response_mode=\"blocking\", user=\"user_id\", files=files)\ncompletion_response.raise_for_status()\n\nresult = completion_response.json()\n\nprint(result.get('answer'))\n```\n\n- chat generate with `streaming` response_mode\n\n```python\nimport json\nfrom mlchain_client import ChatClient\n\napi_key = \"your_api_key\"\n\n# Initialize ChatClient\nchat_client = ChatClient(api_key)\n\n# Create Chat Message using ChatClient\nchat_response = chat_client.create_chat_message(inputs={}, query=\"Hello\", user=\"user_id\", response_mode=\"streaming\")\nchat_response.raise_for_status()\n\nfor line in chat_response.iter_lines(decode_unicode=True):\n    line = line.split('data:', 1)[-1]\n    if line.strip():\n        line = json.loads(line.strip())\n        print(line.get('answer'))\n```\n\n- chat using vision model, like gpt-4-vision\n\n```python\nfrom mlchain_client import ChatClient\n\napi_key = \"your_api_key\"\n\n# Initialize ChatClient\nchat_client = ChatClient(api_key)\n\nfiles = [{\n    \"type\": \"image\",\n    \"transfer_method\": \"remote_url\",\n    \"url\": \"your_image_url\"\n}]\n\n# files = [{\n#     \"type\": \"image\",\n#     \"transfer_method\": \"local_file\",\n#     \"upload_file_id\": \"your_file_id\"\n# }]\n\n# Create Chat Message using ChatClient\nchat_response = chat_client.create_chat_message(inputs={}, query=\"Describe the picture.\", user=\"user_id\",\n                                                response_mode=\"blocking\", files=files)\nchat_response.raise_for_status()\n\nresult = chat_response.json()\n\nprint(result.get(\"answer\"))\n```\n\n- upload file when using vision model\n\n```python\nfrom mlchain_client import MlchainClient\n\napi_key = \"your_api_key\"\n\n# Initialize Client\nmlchain_client = MlchainClient(api_key)\n\nfile_path = \"your_image_file_path\"\nfile_name = \"panda.jpeg\"\nmime_type = \"image/jpeg\"\n\nwith open(file_path, \"rb\") as file:\n    files = {\n        \"file\": (file_name, file, mime_type)\n    }\n    response = mlchain_client.file_upload(\"user_id\", files)\n\n    result = response.json()\n    print(f'upload_file_id: {result.get(\"id\")}')\n```\n  \n\n\n- Others\n\n```python\nfrom mlchain_client import ChatClient\n\napi_key = \"your_api_key\"\n\n# Initialize Client\nclient = ChatClient(api_key)\n\n# Get App parameters\nparameters = client.get_application_parameters(user=\"user_id\")\nparameters.raise_for_status()\n\nprint('[parameters]')\nprint(parameters.json())\n\n# Get Conversation List (only for chat)\nconversations = client.get_conversations(user=\"user_id\")\nconversations.raise_for_status()\n\nprint('[conversations]')\nprint(conversations.json())\n\n# Get Message List (only for chat)\nmessages = client.get_conversation_messages(user=\"user_id\", conversation_id=\"conversation_id\")\nmessages.raise_for_status()\n\nprint('[messages]')\nprint(messages.json())\n\n# Rename Conversation (only for chat)\nrename_conversation_response = client.rename_conversation(conversation_id=\"conversation_id\",\n                                                          name=\"new_name\", user=\"user_id\")\nrename_conversation_response.raise_for_status()\n\nprint('[rename result]')\nprint(rename_conversation_response.json())\n```\n",
    "bugtrack_url": null,
    "license": "MIT",
    "summary": "A package for interacting with the Mlchain Service-API",
    "version": "0.1.12",
    "project_urls": {
        "Homepage": "https://github.com/mlchain/mlchain"
    },
    "split_keywords": [
        "mlchain",
        "nlp",
        "ai",
        "language-processing"
    ],
    "urls": [
        {
            "comment_text": "",
            "digests": {
                "blake2b_256": "b982a14a018e44d04fd5ff471b9d6d09e86ce4787abc839fe788911910a249e5",
                "md5": "bb0dfbf5773a9d7a28a1bb92deceac25",
                "sha256": "3a22bb520e5123af00f3e0dbfbc7cdeaceb6990305fe1843c0f94577d76d2663"
            },
            "downloads": -1,
            "filename": "mlchain_client-0.1.12-py3-none-any.whl",
            "has_sig": false,
            "md5_digest": "bb0dfbf5773a9d7a28a1bb92deceac25",
            "packagetype": "bdist_wheel",
            "python_version": "py3",
            "requires_python": ">=3.6",
            "size": 6362,
            "upload_time": "2024-11-26T11:51:06",
            "upload_time_iso_8601": "2024-11-26T11:51:06.138446Z",
            "url": "https://files.pythonhosted.org/packages/b9/82/a14a018e44d04fd5ff471b9d6d09e86ce4787abc839fe788911910a249e5/mlchain_client-0.1.12-py3-none-any.whl",
            "yanked": false,
            "yanked_reason": null
        },
        {
            "comment_text": "",
            "digests": {
                "blake2b_256": "b0e6f627d2655d8bf5f764f17f49fa1b75172628de1cc784697d31e234e1a8c3",
                "md5": "693f06a090f0cc0a06558765a1a523ba",
                "sha256": "b3bd41eebf8f99a19507f537ed8b57b00023a6dda7f0097f956f9b5681c564db"
            },
            "downloads": -1,
            "filename": "mlchain_client-0.1.12.tar.gz",
            "has_sig": false,
            "md5_digest": "693f06a090f0cc0a06558765a1a523ba",
            "packagetype": "sdist",
            "python_version": "source",
            "requires_python": ">=3.6",
            "size": 7669,
            "upload_time": "2024-11-26T11:51:07",
            "upload_time_iso_8601": "2024-11-26T11:51:07.951738Z",
            "url": "https://files.pythonhosted.org/packages/b0/e6/f627d2655d8bf5f764f17f49fa1b75172628de1cc784697d31e234e1a8c3/mlchain_client-0.1.12.tar.gz",
            "yanked": false,
            "yanked_reason": null
        }
    ],
    "upload_time": "2024-11-26 11:51:07",
    "github": true,
    "gitlab": false,
    "bitbucket": false,
    "codeberg": false,
    "github_user": "mlchain",
    "github_project": "mlchain",
    "travis_ci": false,
    "coveralls": false,
    "github_actions": true,
    "lcname": "mlchain-client"
}
        
Elapsed time: 0.48044s