# InsightFinder AI SDK
A super user-friendly Python SDK for the InsightFinder AI platform. Designed for non-technical users who want powerful AI capabilities with clean, easy-to-read outputs.
## Quick Start
### Basic Setup
```python
from insightfinderai import Client
# Method 1: Provide credentials directly
client = Client(
session_name="llm-eval-test", # Session name - also used for project name generation
username="your_username", # Your username
api_key="your_api_key", # Your API key
enable_chat_evaluation=True # Optional: show evaluation results (default: True)
)
# Method 2: Use environment variables for credentials
# export INSIGHTFINDER_USERNAME="your_username"
# export INSIGHTFINDER_API_KEY="your_api_key"
client = Client(
session_name="llm-eval-test", # Session name
enable_chat_evaluation=False # Clean output without evaluations
)
```
### Simple Chat
```python
# Basic chat (no conversation history)
response = client.chat("What is artificial intelligence?")
# Access response as object
print(f"Response: {response.response}")
print(f"Prompt: {response.prompt}")
print(f"Evaluations: {response.evaluations}")
print(f"History: {response.history}")
# Print formatted output (same as before)
response.print() # or print(response)
# Chat with conversation history (like ChatGPT)
response1 = client.chat("I'm learning Python", chat_history=True)
response2 = client.chat("What are lists?", chat_history=True) # Uses context from first message
# Conversation history array (ChatGPT API style)
conversation = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there! How can I help?"},
{"role": "user", "content": "Tell me about AI"}
]
response = client.chat(conversation)
```
**Output:**
```
[Chat Response]
Trace ID : abc-123-def
Model : tinyllama
Prompt:
>> What is artificial intelligence?
Response:
>> Artificial intelligence (AI) refers to computer systems that can perform tasks typically requiring human intelligence, such as visual perception, speech recognition, decision-making, and language translation.
Evaluations:
----------------------------------------
1. Type : AnswerRelevance
Score : 4
Explanation : The response directly addresses the question about AI
2. Type : Hallucination
Score : 5
Explanation : The response contains accurate information
```
## All Features
### 1. Chat with Conversation History
```python
# Basic chat without history (default behavior)
response = client.chat("Tell me about space exploration")
# Chat with conversation history enabled
client.chat("I'm interested in machine learning", chat_history=True)
client.chat("What algorithms should I start with?", chat_history=True) # Uses previous context
# Conversation array (ChatGPT API style)
conversation = [
{"role": "user", "content": "What's the weather like?"},
{"role": "assistant", "content": "I don't have real-time weather data. What's your location?"},
{"role": "user", "content": "I'm in San Francisco"}
]
response = client.chat(conversation, chat_history=True)
```
### 2. Conversation Management
```python
# Retrieve current conversation history
history = client.retrieve_chat_history()
for msg in history:
print(f"[{msg['role'].upper()}] {msg['content']}")
# Clear conversation history
client.clear_chat_history()
# Set conversation history manually
client.set_chat_history([
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there!"}
])
```
### 3. Save & Load Conversations
```python
# Save conversation to file
saved_file = client.save_chat_history("my_conversation.json")
print(f"Saved to: {saved_file}")
# Save with auto-generated filename
auto_file = client.save_chat_history() # Creates timestamped file
# Load conversation from file
loaded_data = client.load_chat_history("my_conversation.json")
print(f"Loaded {loaded_data['message_count']} messages")
# Restore conversation (flexible input)
client.set_chat_history(loaded_data) # Pass full loaded data
# or
client.set_chat_history(loaded_data["conversation"]) # Pass just messages
```
### 4. Batch Chat
```python
# Process multiple questions at once (parallel - default)
prompts = [
"What's the weather like?",
"Tell me a joke",
"Explain quantum physics"
]
batch_response = client.batch_chat(prompts)
# Access as object
print(f"Processed {batch_response.summary['total_chats']} chats")
for i, response in enumerate(batch_response.response):
print(f"Response {i+1}: {response.response[:50]}...")
# Print formatted output
batch_response.print()
# Sequential processing with conversation history
conversation_prompts = [
"Hello, my name is John",
"What's my name?",
"Tell me about our conversation so far"
]
# Each prompt builds on the previous response
batch_with_history = client.batch_chat(conversation_prompts, enable_history=True)
# Access conversation flow
print("Conversation flow:")
for i, response in enumerate(batch_with_history.response):
print(f"Prompt {i+1}: {response.prompt}")
print(f"Response: {response.response[:100]}...")
print(f"History length: {len(response.history)}")
print()
```
### 5. Evaluation
```python
# Evaluate any prompt-response pair
result = client.evaluate(
prompt="What's the capital of France?",
response="The capital of France is Paris"
)
# Access as object
print(f"Evaluations: {result.summary['total_evaluations']}")
print(f"Passed: {result.summary['passed_evaluations']}")
print(f"Failed: {result.summary['failed_evaluations']}")
# Print formatted output
result.print() # Shows evaluation breakdown
```
### 6. Batch Evaluation
```python
# Evaluate multiple prompt and response pairs efficiently
pairs = [
("What's 2+2?", "4"),
("Capital of Japan?", "Tokyo"),
("Tell me about AI", "AI stands for artificial intelligence")
]
results = client.batch_evaluate(pairs)
for result in results:
print(result)
```
### 7. Safety Evaluation
```python
# Check for PII/PHI leakage and safety issues
safety_result = client.safety_evaluation("What's your social security number?")
# Access as object
print(f"Safety evaluations: {len(safety_result.evaluations)}")
print(f"Summary: {safety_result.summary}")
# Print formatted output
safety_result.print()
```
### 8. Batch Safety Evaluation
```python
# Check multiple prompts for safety
prompts = [
"Hello there!",
"What's your credit card number?",
"Tell me your password"
]
batch_safety = client.batch_safety_evaluation(prompts)
# Access summary statistics
summary = batch_safety.summary
print(f"Total prompts: {summary['total_prompts']}")
print(f"Passed safety: {summary['passed_evaluations']}")
print(f"Failed safety: {summary['failed_evaluations']}")
# Print formatted output
batch_safety.print()
```
### Object Properties
All responses now return rich objects with properties and methods while maintaining backward compatibility:
```python
# Chat Response
response = client.chat("Hello")
response.response # AI response text
response.prompt # Original prompt
response.evaluations # Evaluation object (if enabled)
response.history # Conversation history
response.trace_id # Unique identifier
response.model # Model name
response.is_passed # True if no evaluations or all passed, False otherwise
response.print() # Formatted output
print(response) # Same as response.print() - backward compatible!
# Evaluation Response
result = client.evaluate("prompt", "response")
result.prompt # Original prompt
result.response # Response evaluated
result.evaluations # List of evaluation details
result.summary # Statistics: total, passed, failed, top_failed
result.is_passed # True if no evaluations (empty = passed), False if has evaluations
result.print() # Formatted output
# Batch Responses
batch = client.batch_chat(["Hello", "Goodbye"])
batch.response # List of ChatResponse objects
batch.summary # Statistics: total_chats, successful, failed
batch.is_passed # True if all individual responses passed, False otherwise
batch.print() # Formatted output with summary
batch_eval = client.batch_evaluate([("Q", "A")])
batch_eval.evaluations # List of EvaluationResult objects
batch_eval.summary # Statistics: total_prompts, passed, failed, top_failed
batch_eval.is_passed # True if all evaluations passed, False otherwise
batch_eval.print() # Formatted output with summary
```
## Enhanced Chat Features
The SDK supports multiple ways to manage conversation history:
```python
# Mode 1: No history (default) - each chat is independent
response = client.chat("Hello") # chat_history=False (default)
# Mode 2: Automatic history - conversations build context
client.chat("I'm learning Python", chat_history=True)
client.chat("Explain functions", chat_history=True) # Uses Python context
# Mode 3: Manual history - full control over conversation
conversation = [
{"role": "user", "content": "What's machine learning?"},
{"role": "assistant", "content": "ML is a subset of AI..."},
{"role": "user", "content": "Give me an example"}
]
response = client.chat(conversation)
```
### History Persistence
```python
# Save any conversation for later use
client.chat("Discuss quantum computing", chat_history=True)
client.chat("Explain qubits", chat_history=True)
# Save conversation
filename = client.save_chat_history("quantum_discussion.json")
# Later... load and continue
data = client.load_chat_history("quantum_discussion.json")
client.set_chat_history(data) # Restore context
client.chat("What about quantum entanglement?", chat_history=True) # Continues from where you left off
```
## Customization Options
```bash
# Set once, use everywhere
export INSIGHTFINDER_USERNAME="your_username"
export INSIGHTFINDER_API_KEY="your_api_key"
```
```python
# No need to provide credentials in code
client = Client(
session_name="llm-eval-test" # Session name - also used for project auto-generation
)
```
### Evaluation Display Control
```python
# Show evaluations and safety results in chat responses
client = Client(
session_name="llm-eval-test", # Session name for chat and auto project generation
enable_chat_evaluation=True
)
response = client.chat("Hello!")
# Output includes: response + evaluations + safety results
# Hide evaluations for clean output
client = Client(
session_name="llm-eval-test", # Session name for chat and auto project generation
enable_chat_evaluation=False
)
response = client.chat("Hello!")
# Output includes: response only (clean and minimal)
```
### Performance Tuning
```python
# Adjust parallel workers for batch operations
batch_responses = client.batch_chat(prompts, max_workers=5)
# Enable sequential processing with conversation history
batch_with_context = client.batch_chat(prompts, enable_history=True)
# Control streaming and history
response = client.chat(
"Hello!",
stream=True, # Show real-time response
chat_history=True # Enable conversation context
)
# Access response properties
print(f"Response: {response.response}")
print(f"History: {len(response.history)} messages")
```
### Custom Trace IDs
```python
# Use your own trace IDs for tracking
result = client.evaluate(
prompt="Test question",
response="Test answer",
trace_id="my-custom-trace-123"
)
```
### Session Name Override
All main methods (`chat`, `batch_chat`, `evaluate`, `batch_evaluate`, `safety_evaluation`, `batch_safety_evaluation`) now support per-call session name override:
```python
# Initialize client with default session name
client = Client(
session_name="default-session",
username="user",
api_key="key"
)
# Override session name for specific operations
response = client.chat("Hello", session_name="custom-chat-session")
eval_result = client.evaluate("prompt", "response", session_name="custom-eval-session")
safety_result = client.safety_evaluation("prompt", session_name="custom-safety-session")
# Batch operations with custom session names
batch_chat = client.batch_chat(prompts, session_name="custom-batch-session")
batch_eval = client.batch_evaluate(pairs, session_name="custom-eval-session")
batch_safety = client.batch_safety_evaluation(prompts, session_name="custom-safety-session")
# When session_name is provided, it's used to generate the project name for evaluations
# If session_name is not provided, the default session name from Client() is used
```
## Understanding Evaluations
The SDK automatically evaluates responses for:
- **Answer Relevance**: How well the response answers the question
- **Hallucination**: Whether the response contains false information
- **Logical Consistency**: How logical and coherent the response is
- **Bias**: Detection of potential bias in responses
- **PII/PHI Leakage**: Safety check for sensitive information exposure
Each evaluation includes:
- **Score**: Raw numeric score (as returned by the API)
- **Explanation**: Clear description of the evaluation
- **Type**: Category of evaluation performed
## Error Handling
The SDK provides clear, user-friendly error messages:
```python
try:
response = client.chat("") # Empty prompt
except ValueError as e:
print(e) # "Prompt cannot be empty"
```
## Custom API URL
```python
# Use a custom API endpoint
client = Client(
session_name="llm-eval-test", # Session name for chat and auto project generation
username="user",
api_key="key",
url="https://your-custom-api.com"
)
```
## Pro Tips
1. **Batch Processing**: Use batch methods for multiple requests - they're much faster!
2. **Stream Control**: Turn off streaming for batch operations to reduce noise
3. **Safety First**: Keep safety evaluation enabled for production use
## Requirements
- Python 3.7+
- `requests` library (automatically installed)
## License
This project is licensed under the MIT License.
Raw data
{
"_id": null,
"home_page": null,
"name": "insightfinderai",
"maintainer": null,
"docs_url": null,
"requires_python": ">=3.7",
"maintainer_email": null,
"keywords": "insightfinderai, sdk, api",
"author": null,
"author_email": "InsightFinderAI <support@insightfinder.com>",
"download_url": "https://files.pythonhosted.org/packages/4d/2a/4726235918ae10566fa864faf63a605c772457cf8436e155af95cc5fa4ec/insightfinderai-2.4.9.tar.gz",
"platform": null,
"description": "# InsightFinder AI SDK\n\nA super user-friendly Python SDK for the InsightFinder AI platform. Designed for non-technical users who want powerful AI capabilities with clean, easy-to-read outputs.\n\n## Quick Start\n\n### Basic Setup\n\n```python\nfrom insightfinderai import Client\n\n# Method 1: Provide credentials directly\nclient = Client(\n session_name=\"llm-eval-test\", # Session name - also used for project name generation\n username=\"your_username\", # Your username\n api_key=\"your_api_key\", # Your API key \n enable_chat_evaluation=True # Optional: show evaluation results (default: True)\n)\n\n# Method 2: Use environment variables for credentials\n# export INSIGHTFINDER_USERNAME=\"your_username\"\n# export INSIGHTFINDER_API_KEY=\"your_api_key\"\nclient = Client(\n session_name=\"llm-eval-test\", # Session name\n enable_chat_evaluation=False # Clean output without evaluations\n)\n```\n\n### Simple Chat\n\n```python\n# Basic chat (no conversation history)\nresponse = client.chat(\"What is artificial intelligence?\")\n\n# Access response as object\nprint(f\"Response: {response.response}\")\nprint(f\"Prompt: {response.prompt}\")\nprint(f\"Evaluations: {response.evaluations}\")\nprint(f\"History: {response.history}\")\n\n# Print formatted output (same as before)\nresponse.print() # or print(response)\n\n# Chat with conversation history (like ChatGPT)\nresponse1 = client.chat(\"I'm learning Python\", chat_history=True)\nresponse2 = client.chat(\"What are lists?\", chat_history=True) # Uses context from first message\n\n# Conversation history array (ChatGPT API style)\nconversation = [\n {\"role\": \"user\", \"content\": \"Hello\"},\n {\"role\": \"assistant\", \"content\": \"Hi there! How can I help?\"},\n {\"role\": \"user\", \"content\": \"Tell me about AI\"}\n]\nresponse = client.chat(conversation)\n```\n\n**Output:**\n```\n[Chat Response]\nTrace ID : abc-123-def\nModel : tinyllama\n\nPrompt:\n>> What is artificial intelligence?\n\nResponse:\n>> Artificial intelligence (AI) refers to computer systems that can perform tasks typically requiring human intelligence, such as visual perception, speech recognition, decision-making, and language translation.\n\nEvaluations:\n----------------------------------------\n1. Type : AnswerRelevance\n Score : 4\n Explanation : The response directly addresses the question about AI\n\n2. Type : Hallucination\n Score : 5\n Explanation : The response contains accurate information\n```\n\n## All Features\n\n### 1. Chat with Conversation History\n\n```python\n# Basic chat without history (default behavior)\nresponse = client.chat(\"Tell me about space exploration\")\n\n# Chat with conversation history enabled\nclient.chat(\"I'm interested in machine learning\", chat_history=True)\nclient.chat(\"What algorithms should I start with?\", chat_history=True) # Uses previous context\n\n# Conversation array (ChatGPT API style)\nconversation = [\n {\"role\": \"user\", \"content\": \"What's the weather like?\"},\n {\"role\": \"assistant\", \"content\": \"I don't have real-time weather data. What's your location?\"},\n {\"role\": \"user\", \"content\": \"I'm in San Francisco\"}\n]\nresponse = client.chat(conversation, chat_history=True)\n```\n\n### 2. Conversation Management\n\n```python\n# Retrieve current conversation history\nhistory = client.retrieve_chat_history()\nfor msg in history:\n print(f\"[{msg['role'].upper()}] {msg['content']}\")\n\n# Clear conversation history\nclient.clear_chat_history()\n\n# Set conversation history manually\nclient.set_chat_history([\n {\"role\": \"user\", \"content\": \"Hello\"},\n {\"role\": \"assistant\", \"content\": \"Hi there!\"}\n])\n```\n\n### 3. Save & Load Conversations\n\n```python\n# Save conversation to file\nsaved_file = client.save_chat_history(\"my_conversation.json\")\nprint(f\"Saved to: {saved_file}\")\n\n# Save with auto-generated filename\nauto_file = client.save_chat_history() # Creates timestamped file\n\n# Load conversation from file\nloaded_data = client.load_chat_history(\"my_conversation.json\")\nprint(f\"Loaded {loaded_data['message_count']} messages\")\n\n# Restore conversation (flexible input)\nclient.set_chat_history(loaded_data) # Pass full loaded data\n# or\nclient.set_chat_history(loaded_data[\"conversation\"]) # Pass just messages\n```\n\n### 4. Batch Chat\n\n```python\n# Process multiple questions at once (parallel - default)\nprompts = [\n \"What's the weather like?\",\n \"Tell me a joke\",\n \"Explain quantum physics\"\n]\n\nbatch_response = client.batch_chat(prompts)\n\n# Access as object\nprint(f\"Processed {batch_response.summary['total_chats']} chats\")\nfor i, response in enumerate(batch_response.response):\n print(f\"Response {i+1}: {response.response[:50]}...\")\n\n# Print formatted output\nbatch_response.print()\n\n# Sequential processing with conversation history\nconversation_prompts = [\n \"Hello, my name is John\",\n \"What's my name?\",\n \"Tell me about our conversation so far\"\n]\n\n# Each prompt builds on the previous response\nbatch_with_history = client.batch_chat(conversation_prompts, enable_history=True)\n\n# Access conversation flow\nprint(\"Conversation flow:\")\nfor i, response in enumerate(batch_with_history.response):\n print(f\"Prompt {i+1}: {response.prompt}\")\n print(f\"Response: {response.response[:100]}...\")\n print(f\"History length: {len(response.history)}\")\n print()\n```\n\n### 5. Evaluation\n\n```python\n# Evaluate any prompt-response pair\nresult = client.evaluate(\n prompt=\"What's the capital of France?\",\n response=\"The capital of France is Paris\"\n)\n\n# Access as object\nprint(f\"Evaluations: {result.summary['total_evaluations']}\")\nprint(f\"Passed: {result.summary['passed_evaluations']}\")\nprint(f\"Failed: {result.summary['failed_evaluations']}\")\n\n# Print formatted output\nresult.print() # Shows evaluation breakdown\n```\n\n### 6. Batch Evaluation\n\n```python\n# Evaluate multiple prompt and response pairs efficiently\npairs = [\n (\"What's 2+2?\", \"4\"),\n (\"Capital of Japan?\", \"Tokyo\"),\n (\"Tell me about AI\", \"AI stands for artificial intelligence\")\n]\n\nresults = client.batch_evaluate(pairs)\nfor result in results:\n print(result)\n```\n\n### 7. Safety Evaluation\n\n```python\n# Check for PII/PHI leakage and safety issues\nsafety_result = client.safety_evaluation(\"What's your social security number?\")\n\n# Access as object\nprint(f\"Safety evaluations: {len(safety_result.evaluations)}\")\nprint(f\"Summary: {safety_result.summary}\")\n\n# Print formatted output\nsafety_result.print()\n```\n\n### 8. Batch Safety Evaluation\n\n```python\n# Check multiple prompts for safety\nprompts = [\n \"Hello there!\",\n \"What's your credit card number?\", \n \"Tell me your password\"\n]\n\nbatch_safety = client.batch_safety_evaluation(prompts)\n\n# Access summary statistics\nsummary = batch_safety.summary\nprint(f\"Total prompts: {summary['total_prompts']}\")\nprint(f\"Passed safety: {summary['passed_evaluations']}\")\nprint(f\"Failed safety: {summary['failed_evaluations']}\")\n\n# Print formatted output\nbatch_safety.print()\n```\n\n### Object Properties\n\nAll responses now return rich objects with properties and methods while maintaining backward compatibility:\n\n```python\n# Chat Response\nresponse = client.chat(\"Hello\")\nresponse.response # AI response text\nresponse.prompt # Original prompt\nresponse.evaluations # Evaluation object (if enabled)\nresponse.history # Conversation history\nresponse.trace_id # Unique identifier\nresponse.model # Model name\nresponse.is_passed # True if no evaluations or all passed, False otherwise\nresponse.print() # Formatted output\nprint(response) # Same as response.print() - backward compatible!\n\n# Evaluation Response \nresult = client.evaluate(\"prompt\", \"response\")\nresult.prompt # Original prompt\nresult.response # Response evaluated\nresult.evaluations # List of evaluation details\nresult.summary # Statistics: total, passed, failed, top_failed\nresult.is_passed # True if no evaluations (empty = passed), False if has evaluations\nresult.print() # Formatted output\n\n# Batch Responses\nbatch = client.batch_chat([\"Hello\", \"Goodbye\"])\nbatch.response # List of ChatResponse objects\nbatch.summary # Statistics: total_chats, successful, failed\nbatch.is_passed # True if all individual responses passed, False otherwise\nbatch.print() # Formatted output with summary\n\nbatch_eval = client.batch_evaluate([(\"Q\", \"A\")])\nbatch_eval.evaluations # List of EvaluationResult objects\nbatch_eval.summary # Statistics: total_prompts, passed, failed, top_failed\nbatch_eval.is_passed # True if all evaluations passed, False otherwise\nbatch_eval.print() # Formatted output with summary\n```\n\n## Enhanced Chat Features\n\nThe SDK supports multiple ways to manage conversation history:\n\n```python\n# Mode 1: No history (default) - each chat is independent\nresponse = client.chat(\"Hello\") # chat_history=False (default)\n\n# Mode 2: Automatic history - conversations build context\nclient.chat(\"I'm learning Python\", chat_history=True)\nclient.chat(\"Explain functions\", chat_history=True) # Uses Python context\n\n# Mode 3: Manual history - full control over conversation\nconversation = [\n {\"role\": \"user\", \"content\": \"What's machine learning?\"},\n {\"role\": \"assistant\", \"content\": \"ML is a subset of AI...\"},\n {\"role\": \"user\", \"content\": \"Give me an example\"}\n]\nresponse = client.chat(conversation)\n```\n\n### History Persistence\n\n```python\n# Save any conversation for later use\nclient.chat(\"Discuss quantum computing\", chat_history=True)\nclient.chat(\"Explain qubits\", chat_history=True)\n\n# Save conversation\nfilename = client.save_chat_history(\"quantum_discussion.json\")\n\n# Later... load and continue\ndata = client.load_chat_history(\"quantum_discussion.json\")\nclient.set_chat_history(data) # Restore context\nclient.chat(\"What about quantum entanglement?\", chat_history=True) # Continues from where you left off\n```\n\n## Customization Options\n```bash\n# Set once, use everywhere\nexport INSIGHTFINDER_USERNAME=\"your_username\"\nexport INSIGHTFINDER_API_KEY=\"your_api_key\"\n```\n\n```python\n# No need to provide credentials in code\nclient = Client(\n session_name=\"llm-eval-test\" # Session name - also used for project auto-generation\n)\n```\n\n### Evaluation Display Control\n```python\n# Show evaluations and safety results in chat responses\nclient = Client(\n session_name=\"llm-eval-test\", # Session name for chat and auto project generation\n enable_chat_evaluation=True\n)\nresponse = client.chat(\"Hello!\")\n# Output includes: response + evaluations + safety results\n\n# Hide evaluations for clean output\nclient = Client(\n session_name=\"llm-eval-test\", # Session name for chat and auto project generation\n enable_chat_evaluation=False\n)\nresponse = client.chat(\"Hello!\")\n# Output includes: response only (clean and minimal)\n```\n\n### Performance Tuning\n```python\n# Adjust parallel workers for batch operations\nbatch_responses = client.batch_chat(prompts, max_workers=5)\n\n# Enable sequential processing with conversation history\nbatch_with_context = client.batch_chat(prompts, enable_history=True)\n\n# Control streaming and history\nresponse = client.chat(\n \"Hello!\",\n stream=True, # Show real-time response\n chat_history=True # Enable conversation context\n)\n\n# Access response properties\nprint(f\"Response: {response.response}\")\nprint(f\"History: {len(response.history)} messages\")\n```\n\n### Custom Trace IDs\n```python\n# Use your own trace IDs for tracking\nresult = client.evaluate(\n prompt=\"Test question\",\n response=\"Test answer\", \n trace_id=\"my-custom-trace-123\"\n)\n```\n\n### Session Name Override\n\nAll main methods (`chat`, `batch_chat`, `evaluate`, `batch_evaluate`, `safety_evaluation`, `batch_safety_evaluation`) now support per-call session name override:\n\n```python\n# Initialize client with default session name\nclient = Client(\n session_name=\"default-session\",\n username=\"user\",\n api_key=\"key\"\n)\n\n# Override session name for specific operations\nresponse = client.chat(\"Hello\", session_name=\"custom-chat-session\")\neval_result = client.evaluate(\"prompt\", \"response\", session_name=\"custom-eval-session\")\nsafety_result = client.safety_evaluation(\"prompt\", session_name=\"custom-safety-session\")\n\n# Batch operations with custom session names\nbatch_chat = client.batch_chat(prompts, session_name=\"custom-batch-session\")\nbatch_eval = client.batch_evaluate(pairs, session_name=\"custom-eval-session\")\nbatch_safety = client.batch_safety_evaluation(prompts, session_name=\"custom-safety-session\")\n\n# When session_name is provided, it's used to generate the project name for evaluations\n# If session_name is not provided, the default session name from Client() is used\n```\n\n## Understanding Evaluations\n\nThe SDK automatically evaluates responses for:\n\n- **Answer Relevance**: How well the response answers the question\n- **Hallucination**: Whether the response contains false information \n- **Logical Consistency**: How logical and coherent the response is\n- **Bias**: Detection of potential bias in responses\n- **PII/PHI Leakage**: Safety check for sensitive information exposure\n\nEach evaluation includes:\n- **Score**: Raw numeric score (as returned by the API)\n- **Explanation**: Clear description of the evaluation\n- **Type**: Category of evaluation performed\n\n## Error Handling\n\nThe SDK provides clear, user-friendly error messages:\n\n```python\ntry:\n response = client.chat(\"\") # Empty prompt\nexcept ValueError as e:\n print(e) # \"Prompt cannot be empty\"\n```\n\n## Custom API URL\n\n```python\n# Use a custom API endpoint\nclient = Client(\n session_name=\"llm-eval-test\", # Session name for chat and auto project generation\n username=\"user\",\n api_key=\"key\", \n url=\"https://your-custom-api.com\"\n)\n```\n\n## Pro Tips\n\n1. **Batch Processing**: Use batch methods for multiple requests - they're much faster!\n2. **Stream Control**: Turn off streaming for batch operations to reduce noise\n3. **Safety First**: Keep safety evaluation enabled for production use\n\n## Requirements\n\n- Python 3.7+\n- `requests` library (automatically installed)\n\n## License\n\nThis project is licensed under the MIT License.\n",
"bugtrack_url": null,
"license": null,
"summary": "A Python SDK for interfacing with the InsightFinder API.",
"version": "2.4.9",
"project_urls": {
"Homepage": "https://insightfinder.com/",
"Repository": "https://github.com/insightfinder/ifai-sdk"
},
"split_keywords": [
"insightfinderai",
" sdk",
" api"
],
"urls": [
{
"comment_text": null,
"digests": {
"blake2b_256": "d419a003bab5f25e9a72da9a16efb959cbf2f9946e3d4ceae0e5b5c9931c2055",
"md5": "6467b7aeb154d9b7a73e73f574d521e7",
"sha256": "ee456a6001aa19455e70f59434dbb4a3abb858f8f0483c131543e2484c9e30a7"
},
"downloads": -1,
"filename": "insightfinderai-2.4.9-py3-none-any.whl",
"has_sig": false,
"md5_digest": "6467b7aeb154d9b7a73e73f574d521e7",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": ">=3.7",
"size": 43909,
"upload_time": "2025-07-28T21:22:05",
"upload_time_iso_8601": "2025-07-28T21:22:05.940002Z",
"url": "https://files.pythonhosted.org/packages/d4/19/a003bab5f25e9a72da9a16efb959cbf2f9946e3d4ceae0e5b5c9931c2055/insightfinderai-2.4.9-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": null,
"digests": {
"blake2b_256": "4d2a4726235918ae10566fa864faf63a605c772457cf8436e155af95cc5fa4ec",
"md5": "b2d3422d7db903cc55b12a4678e594ef",
"sha256": "89fb0b79670f4e08061368ed5bcd8a9566dde9dafb89dc084b213103fd0c950b"
},
"downloads": -1,
"filename": "insightfinderai-2.4.9.tar.gz",
"has_sig": false,
"md5_digest": "b2d3422d7db903cc55b12a4678e594ef",
"packagetype": "sdist",
"python_version": "source",
"requires_python": ">=3.7",
"size": 37676,
"upload_time": "2025-07-28T21:22:06",
"upload_time_iso_8601": "2025-07-28T21:22:06.970602Z",
"url": "https://files.pythonhosted.org/packages/4d/2a/4726235918ae10566fa864faf63a605c772457cf8436e155af95cc5fa4ec/insightfinderai-2.4.9.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2025-07-28 21:22:06",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "insightfinder",
"github_project": "ifai-sdk",
"travis_ci": false,
"coveralls": false,
"github_actions": false,
"requirements": [
{
"name": "requests",
"specs": [
[
">=",
"2.25.0"
]
]
}
],
"lcname": "insightfinderai"
}