### Python Client and Documentation
- Python client: https://pypi.org/project/h2ogpte/
- Technical API documentation: https://h2oai.github.io/h2ogpte/
- General Documentation: https://docs.h2o.ai/h2ogpte-docs/
- RAG Benchmarks: [latest results](https://github.com/h2oai/enterprise-h2ogpte/blob/main/rag_benchmark/results/test_client_e2e.md) and [how to reproduce](https://github.com/h2oai/enterprise-h2ogpte/tree/main/rag_benchmark)
We recommend installing the client with the same version as the software:
```bash
pip install h2ogpte
```
### API Keys and Python Client Examples
API keys are needed to programmatically connect to h2oGPTe from the Python client.
There are two kinds of API keys:
- **Global API key** allows a client to impersonate your user for all API calls.
- **Collection-specific API keys** allows a client to chat with your specific collection.
#### Global API keys
If a collection is not specified when creating a new API key,
that key is considered to be a global API key. Use global API
keys to grant full user impersonation and system-wide access
to all of your work. Anyone with access to one of your global
API keys can create, delete, or interact with any of your past,
current, and future collections, documents, chats, and settings.
The GUI offers an **Impersonate** feature under the user settings.
```py
from h2ogpte import H2OGPTE
client = H2OGPTE(
address='https://h2ogpte.genai.h2o.ai',
api_key='sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
)
# Create a new collection
collection_id = client.create_collection(
name='Contracts',
description='Paper clip supply contracts',
)
# Create documents
# Note: Done for demonstration purposes only (not usually needed)
with open('dunder_mifflin.txt', 'w') as f:
f.write('There were 55 paper clips shipped, 22 to Scranton and 33 to Filmer.')
with open('initech.txt', 'w') as f:
f.write('David Brent did not sign any contract with Initech.')
# Upload documents
# Many file types are supported: text/image/audio documents and archives
with open('dunder_mifflin.txt', 'rb') as f:
dunder_mifflin = client.upload('Dunder Mifflin.txt', f)
with open('initech.txt', 'rb') as f:
initech = client.upload('IniTech.txt', f)
# Ingest documents (Creates previews, chunks and embeddings)
client.ingest_uploads(collection_id, [dunder_mifflin, initech])
# Create a chat session
chat_session_id = client.create_chat_session(collection_id)
# Query the collection
with client.connect(chat_session_id) as session:
reply = session.query(
'How many paper clips were shipped to Scranton?',
timeout=60,
)
print(reply.content)
reply = session.query(
'Did David Brent co-sign the contract with Initech?',
timeout=60,
)
print(reply.content)
# In case have multiple LLMs, route to LLM with best
# price/performance below given max cost
reply = session.query(
'Did David Brent co-sign the contract with Initech?',
llm='auto',
llm_args=dict(cost_controls=dict(max_cost=1e-2)),
timeout=60,
)
print(reply.content)
# Classification
reply = session.query(
'Did David Brent co-sign the contract with Initech?',
llm_args=dict(
guided_choice=['yes', 'no', 'unclear'],
),
timeout=60,
)
print(reply.content)
# Create custom JSON
reply = session.query(
'How many paper clips were shipped to Scranton?',
llm_args=dict(
response_format='json_object',
guided_json={
'$schema': 'http://json-schema.org/draft-07/schema#',
'type': 'object',
'properties': {'count': {'type': 'integer'}},
'required': [
'count',
],
},
),
timeout=60,
)
print(reply.content)
# Force multimodal vision mode (requires vision-capable LLMs)
reply = session.query(
'How many paper clips were shipped to Scranton?',
llm_args=dict(
enable_vision='on',
),
timeout=60,
)
print(reply.content)
# Summarize each document
documents = client.list_documents_in_collection(collection_id, offset=0, limit=99)
for doc in documents:
summary = client.process_document(
document_id=doc.id,
pre_prompt_summary='Pay attention to the following text in order to summarize.',
prompt_summary='Write a concise summary from the text above.',
timeout=60,
)
print(summary.content)
# Chat with LLM without a collection
chat_session_id = client.create_chat_session()
with client.connect(chat_session_id) as session:
reply = session.query(
'Why is drinking water good for you?',
timeout=60,
)
print(reply.content)
```
#### Collection-specific API keys
Use collection-specific API keys to grant external access to only chat with
the specified collection and make related API calls. Collection-specific API
keys do not allow any other API calls such as creation, deletion, or access
to other collections or chats.
```py
from h2ogpte import H2OGPTE
client = H2OGPTE(
address='https://h2ogpte.genai.h2o.ai',
api_key='sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
)
# Automatically connects to the collection from the
# collection-specific API key
chat_session_id = client.create_chat_session_on_default_collection()
# Query the collection
with client.connect(chat_session_id) as session:
reply = session.query(
'How many paper clips were shipped to Scranton?',
timeout=60,
)
print(reply.content)
reply = session.query(
'Did David Brent co-sign the contract with Initech?',
timeout=60,
)
print(reply.content)
# Summarize each document
default_collection = client.get_default_collection()
documents = client.list_documents_in_collection(default_collection.id, offset=0, limit=99)
for doc in documents:
summary = client.summarize_document(
document_id=doc.id,
timeout=60,
)
print(summary.content)
```
Raw data
{
"_id": null,
"home_page": null,
"name": "h2ogpte",
"maintainer": null,
"docs_url": null,
"requires_python": ">=3.7",
"maintainer_email": null,
"keywords": "information-retrieval, LLM, large-language-models, question-answering, search, semantic-search, analytical-search, lexical-search, document-search, natural-language-querying",
"author": null,
"author_email": "\"H2O.ai, Inc.\" <support@h2o.ai>",
"download_url": null,
"platform": null,
"description": "### Python Client and Documentation\n\n- Python client: https://pypi.org/project/h2ogpte/\n- Technical API documentation: https://h2oai.github.io/h2ogpte/\n- General Documentation: https://docs.h2o.ai/h2ogpte-docs/\n- RAG Benchmarks: [latest results](https://github.com/h2oai/enterprise-h2ogpte/blob/main/rag_benchmark/results/test_client_e2e.md) and [how to reproduce](https://github.com/h2oai/enterprise-h2ogpte/tree/main/rag_benchmark)\n\nWe recommend installing the client with the same version as the software:\n\n```bash\npip install h2ogpte\n```\n\n### API Keys and Python Client Examples\n\nAPI keys are needed to programmatically connect to h2oGPTe from the Python client.\n\nThere are two kinds of API keys:\n\n- **Global API key** allows a client to impersonate your user for all API calls.\n- **Collection-specific API keys** allows a client to chat with your specific collection.\n\n#### Global API keys\n\nIf a collection is not specified when creating a new API key,\nthat key is considered to be a global API key. Use global API\nkeys to grant full user impersonation and system-wide access\nto all of your work. Anyone with access to one of your global\nAPI keys can create, delete, or interact with any of your past,\ncurrent, and future collections, documents, chats, and settings.\nThe GUI offers an **Impersonate** feature under the user settings.\n\n```py\nfrom h2ogpte import H2OGPTE\n\nclient = H2OGPTE(\n address='https://h2ogpte.genai.h2o.ai',\n api_key='sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',\n)\n\n# Create a new collection\ncollection_id = client.create_collection(\n name='Contracts',\n description='Paper clip supply contracts',\n)\n\n# Create documents\n# Note: Done for demonstration purposes only (not usually needed)\nwith open('dunder_mifflin.txt', 'w') as f:\n f.write('There were 55 paper clips shipped, 22 to Scranton and 33 to Filmer.')\n\nwith open('initech.txt', 'w') as f:\n f.write('David Brent did not sign any contract with Initech.')\n\n# Upload documents\n# Many file types are supported: text/image/audio documents and archives\nwith open('dunder_mifflin.txt', 'rb') as f:\n dunder_mifflin = client.upload('Dunder Mifflin.txt', f)\n\nwith open('initech.txt', 'rb') as f:\n initech = client.upload('IniTech.txt', f)\n\n# Ingest documents (Creates previews, chunks and embeddings)\nclient.ingest_uploads(collection_id, [dunder_mifflin, initech])\n\n# Create a chat session\nchat_session_id = client.create_chat_session(collection_id)\n\n# Query the collection\nwith client.connect(chat_session_id) as session:\n reply = session.query(\n 'How many paper clips were shipped to Scranton?',\n timeout=60,\n )\n print(reply.content)\n\n reply = session.query(\n 'Did David Brent co-sign the contract with Initech?',\n timeout=60,\n )\n print(reply.content)\n\n # In case have multiple LLMs, route to LLM with best\n # price/performance below given max cost\n reply = session.query(\n 'Did David Brent co-sign the contract with Initech?',\n llm='auto',\n llm_args=dict(cost_controls=dict(max_cost=1e-2)),\n timeout=60,\n )\n print(reply.content)\n\n # Classification\n reply = session.query(\n 'Did David Brent co-sign the contract with Initech?',\n llm_args=dict(\n guided_choice=['yes', 'no', 'unclear'],\n ),\n timeout=60,\n )\n print(reply.content)\n\n # Create custom JSON\n reply = session.query(\n 'How many paper clips were shipped to Scranton?',\n llm_args=dict(\n response_format='json_object',\n guided_json={\n '$schema': 'http://json-schema.org/draft-07/schema#',\n 'type': 'object',\n 'properties': {'count': {'type': 'integer'}},\n 'required': [\n 'count',\n ],\n },\n ),\n timeout=60,\n )\n print(reply.content)\n\n # Force multimodal vision mode (requires vision-capable LLMs)\n reply = session.query(\n 'How many paper clips were shipped to Scranton?',\n llm_args=dict(\n enable_vision='on',\n ),\n timeout=60,\n )\n print(reply.content)\n\n# Summarize each document\ndocuments = client.list_documents_in_collection(collection_id, offset=0, limit=99)\nfor doc in documents:\n summary = client.process_document(\n document_id=doc.id,\n pre_prompt_summary='Pay attention to the following text in order to summarize.',\n prompt_summary='Write a concise summary from the text above.',\n timeout=60,\n )\n print(summary.content)\n\n# Chat with LLM without a collection\nchat_session_id = client.create_chat_session()\n\nwith client.connect(chat_session_id) as session:\n reply = session.query(\n 'Why is drinking water good for you?',\n timeout=60,\n )\n print(reply.content)\n```\n\n#### Collection-specific API keys\n\nUse collection-specific API keys to grant external access to only chat with\nthe specified collection and make related API calls. Collection-specific API\nkeys do not allow any other API calls such as creation, deletion, or access\nto other collections or chats.\n\n```py\nfrom h2ogpte import H2OGPTE\n\nclient = H2OGPTE(\n address='https://h2ogpte.genai.h2o.ai',\n api_key='sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',\n)\n\n# Automatically connects to the collection from the\n# collection-specific API key\nchat_session_id = client.create_chat_session_on_default_collection()\n\n# Query the collection\nwith client.connect(chat_session_id) as session:\n reply = session.query(\n 'How many paper clips were shipped to Scranton?',\n timeout=60,\n )\n print(reply.content)\n\n reply = session.query(\n 'Did David Brent co-sign the contract with Initech?',\n timeout=60,\n )\n print(reply.content)\n\n# Summarize each document\ndefault_collection = client.get_default_collection()\ndocuments = client.list_documents_in_collection(default_collection.id, offset=0, limit=99)\nfor doc in documents:\n summary = client.summarize_document(\n document_id=doc.id,\n timeout=60,\n )\n print(summary.content)\n```\n",
"bugtrack_url": null,
"license": null,
"summary": "Client library for Enterprise h2oGPTe",
"version": "1.6.10",
"project_urls": {
"Documentation": "https://h2oai.github.io/h2ogpte/",
"Issues": "https://github.com/h2oai/h2ogpte/issues",
"Source": "https://github.com/h2oai/h2ogpte"
},
"split_keywords": [
"information-retrieval",
" llm",
" large-language-models",
" question-answering",
" search",
" semantic-search",
" analytical-search",
" lexical-search",
" document-search",
" natural-language-querying"
],
"urls": [
{
"comment_text": "",
"digests": {
"blake2b_256": "20335cf7b399b7f8b85ffe3ad052b97f8240ef79c5d64564d328a69c152c8ae5",
"md5": "20983211dd5d3798db0f1222974533c6",
"sha256": "ba658571a8e3a85d9d002a23892534781ff1729c721407cecbc8e7e2ebc8ce57"
},
"downloads": -1,
"filename": "h2ogpte-1.6.10-py3-none-any.whl",
"has_sig": false,
"md5_digest": "20983211dd5d3798db0f1222974533c6",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": ">=3.7",
"size": 73384,
"upload_time": "2024-12-18T02:43:25",
"upload_time_iso_8601": "2024-12-18T02:43:25.295337Z",
"url": "https://files.pythonhosted.org/packages/20/33/5cf7b399b7f8b85ffe3ad052b97f8240ef79c5d64564d328a69c152c8ae5/h2ogpte-1.6.10-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2024-12-18 02:43:25",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "h2oai",
"github_project": "h2ogpte",
"github_not_found": true,
"lcname": "h2ogpte"
}