# MFCS (Model Function Calling Standard)
<div align="right">
<a href="README.md">English</a> |
<a href="README_CN.md">中文</a>
</div>
Model Function Calling Standard
A Python library for handling function calling in Large Language Models (LLMs).
## Features
- Standardized management for function, memory, and agent calls
- Generate standardized prompt templates for function, memory, and agent calls
- Parse function, memory, and agent calls from LLM output (supports both sync and async streaming)
- Validate parameters and schemas for function, memory, and agent calls
- Unified result management and formatted output for multiple call types
- Async streaming support with real-time multi-type call processing
- Easy unique identifier assignment and call tracking
- Suitable for multi-agent collaboration, tool invocation, memory management, and more
- Highly extensible and integrable for various LLM application scenarios
## Installation
```bash
pip install mfcs
```
## Configuration
1. Copy `.env.example` to `.env`:
```bash
cp .env.example .env
```
2. Edit `.env` and set your environment variables:
```bash
# OpenAI API Configuration
OPENAI_API_KEY=your-api-key-here
OPENAI_API_BASE=your-api-base-url-here
```
## Example Installation
To run the example code, you need to install additional dependencies. The examples are located in the `examples` directory:
```bash
cd examples
pip install -r requirements.txt
```
## Usage
## 1. Prompt Template Generation
### 1.1 Generate Function Calling Prompt Templates
```python
from mfcs.function_prompt import FunctionPromptGenerator
# Define your function schemas
functions = [
{
"name": "get_weather",
"description": "Get the current weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "The unit of temperature to use",
"default": "celsius"
}
},
"required": ["location"]
}
}
]
# Generate prompt template
template = FunctionPromptGenerator.generate_function_prompt(functions)
```
### 1.2 Memory Prompt Management
```python
from mfcs.memory_prompt import MemoryPromptGenerator
# Define memory APIs
memory_apis = [
{
"name": "store_preference",
"description": "Store user preferences and settings",
"parameters": {
"type": "object",
"properties": {
"preference_type": {
"type": "string",
"description": "Type of preference to store"
},
"value": {
"type": "string",
"description": "Value of the preference"
}
},
"required": ["preference_type", "value"]
}
}
]
# Generate memory prompt template
template = MemoryPromptGenerator.generate_memory_prompt(memory_apis)
```
### 1.3 Agent Prompt Management
```python
from mfcs.agent_prompt import AgentPromptGenerator
# Define agent APIs
agent_apis = [
{
"name": "send_result",
"description": "Send result to a specified agent",
"parameters": {
"type": "object",
"properties": {
"content": {
"type": "string",
"description": "The content to send"
}
},
"required": ["content"]
}
}
]
# Generate agent prompt template
template = AgentPromptGenerator.generate_agent_prompt(agent_apis)
```
## 2. Parsing and Invocation
### 2.1 Parse Function, Memory, and Agent Calls from Output
```python
from mfcs.response_parser import ResponseParser
output = """
I need to check the weather and save my preference, and also let agent_A handle the result.
<mfcs_call>
<instructions>Get the weather information for New York</instructions>
<call_id>weather_1</call_id>
<name>get_weather</name>
<parameters>
{
"location": "New York, NY",
"unit": "fahrenheit"
}
</parameters>
</mfcs_call>
<mfcs_memory>
<instructions>Save user preference</instructions>
<memory_id>memory_1</memory_id>
<name>store_preference</name>
<parameters>
{
"preference_type": "weather_unit",
"value": "fahrenheit"
}
</parameters>
</mfcs_memory>
<mfcs_agent>
<instructions>Send the weather result to agent_B</instructions>
<agent_id>agent_1</agent_id>
<name>send_result</name>
<parameters>
{
"content": "The weather in New York is 25°F, sent to agent_B."
}
</parameters>
</mfcs_agent>
"""
parser = ResponseParser()
content, tool_calls, memory_calls, agent_calls = parser.parse_output(output)
print(f"Content: {content}")
print(f"Function calls: {tool_calls}")
print(f"Memory calls: {memory_calls}")
print(f"Agent calls: {agent_calls}")
# Explanation:
# The output now includes <mfcs_call>, <mfcs_memory>, and <mfcs_agent> blocks.
# The <mfcs_agent> block's <parameters> only contains a 'content' field.
# The parser returns agent_calls for further agent-related processing.
```
### 2.2 Async Streaming Processing for Function, Memory, and Agent Calls
```python
from mfcs.response_parser import ResponseParser, ToolCall, MemoryCall, AgentCall
from mfcs.result_manager import ResultManager
import json
async def process_stream():
parser = ResponseParser()
result_manager = ResultManager()
async for delta, call_info, reasoning_content, usage, memory_info, agent_info in parser.parse_stream_output(stream):
# Print reasoning content if present
if reasoning_content:
print(f"Reasoning: {reasoning_content}")
# Print parsed content
if delta:
print(f"Content: {delta.content} (finish reason: {delta.finish_reason})")
# Handle tool calls
if call_info and isinstance(call_info, ToolCall):
print(f"\nTool Call:")
print(f"Instructions: {call_info.instructions}")
print(f"Call ID: {call_info.call_id}")
print(f"Name: {call_info.name}")
print(f"Arguments: {json.dumps(call_info.arguments, indent=2)}")
# Simulate tool execution
result_manager.add_tool_result(
name=call_info.name,
result={"status": "success", "data": f"Simulated data for {call_info.name}"},
call_id=call_info.call_id
)
# Handle memory calls
if memory_info and isinstance(memory_info, MemoryCall):
print(f"\nMemory Call:")
print(f"Instructions: {memory_info.instructions}")
print(f"Memory ID: {memory_info.memory_id}")
print(f"Name: {memory_info.name}")
print(f"Arguments: {json.dumps(memory_info.arguments, indent=2)}")
# Simulate memory operation
result_manager.add_memory_result(
name=memory_info.name,
result={"status": "success"},
memory_id=memory_info.memory_id
)
# Handle agent calls
if agent_info and isinstance(agent_info, AgentCall):
print(f"\nAgent Call:")
print(f"Instructions: {agent_info.instructions}")
print(f"Agent ID: {agent_info.agent_id}")
print(f"Name: {agent_info.name}")
print(f"Arguments: {json.dumps(agent_info.arguments, indent=2)}")
# Simulate Agent operation
result_manager.add_agent_result(
name=agent_info.name,
result={"status": "success"},
memory_id=agent_info.agent_id
)
# Print usage statistics if available
if usage:
print(f"Usage: {usage}")
print("\nTool Results:")
print(result_manager.get_tool_results())
print("Memory Results:")
print(result_manager.get_memory_results())
print("Agent Results:")
print(result_manager.get_agent_results())
```
## 3. Result Management
### 3.1 Function, Memory, and Agent Result Management
The Result Management provides a unified way to handle and format results from tool calls, memory operations, and agent operations in LLM interactions. It ensures consistency and proper cleanup.
```python
# Store tool call results
result_manager.add_tool_result(
name="get_weather", # Tool name
result={"temperature": 25}, # Tool execution result
call_id="weather_1" # Unique identifier for this call
)
# Store memory operation results
result_manager.add_memory_result(
name="store_preference", # Memory operation name
result={"status": "success"}, # Operation result
memory_id="memory_1" # Unique identifier for this operation
)
# Store agent operation results
result_manager.add_agent_result(
name="send_result", # Agent operation name
result={"status": "success"}, # Operation result
agent_id="agent_1" # Unique identifier for this operation
)
# Get formatted results for LLM consumption
tool_results = result_manager.get_tool_results()
# Output format:
# <tool_result>
# {call_id: weather_1, name: get_weather} {"temperature": 25}
# </tool_result>
memory_results = result_manager.get_memory_results()
# Output format:
# <memory_result>
# {memory_id: memory_1, name: store_preference} {"status": "success"}
# </memory_result>
agent_results = result_manager.get_agent_results()
# Output format:
# <agent_result>
# {agent_id: agent_1, name: send_result} {"status": "success"}
# </agent_result>
```
## Examples
### Agent Prompt Benchmark Test
Tests the complete functionality of Agent Prompt, including preventing unnecessary tool calls and validating tool name correctness.
To run the benchmark test:
```bash
python examples/agent_prompt_bench.py
```
### Function Calling Examples
Demonstrates basic and async function calling with MFCS.
To run the basic example:
```bash
python examples/function_calling_examples.py
```
To run the async example:
```bash
python examples/async_function_calling_examples.py
```
### Memory Function Examples
Demonstrates memory prompt usage and async memory functions.
To run the memory example:
```bash
python examples/memory_function_examples.py
```
To run the async memory example:
```bash
python examples/async_memory_function_examples.py
```
### A2A (Agent-to-Agent) Communication Examples
Demonstrates how to use MFCS for agent-to-agent communication.
To run the server example:
```bash
python examples/a2a_server_example.py
```
To run the async client example:
```bash
python examples/async_a2a_client_example.py
```
### MCP Client Examples
Demonstrates MCP client usage (sync and async).
To run the MCP client example:
```bash
python examples/mcp_client_example.py
```
To run the async MCP client example:
```bash
python examples/async_mcp_client_example.py
```
## Notes
- **Python Version Requirement**
Async features require Python 3.8 or higher.
- **Security**
Make sure to handle API keys and sensitive information securely to avoid leaks.
- **API Call Implementation**
The API calls in the example code are simulated. Replace them with your actual business logic in production.
- **Unique Identifiers**
- Use a unique `call_id` for each function call.
- Use a unique `memory_id` for each memory operation.
- Use a unique `agent_id` for each agent operation.
- **Call Format Specification**
- The `<mfcs_call>`, `<mfcs_memory>`, and `<mfcs_agent>` blocks' `<parameters>` fields should be standard JSON.
- The `<mfcs_agent>` block's `<parameters>` should only contain a `content` field for consistency.
- **Prompt Template and Call Rules**
- Always generate prompt templates using the appropriate prompt generator.
- Follow the call rules in the prompt templates to ensure the LLM can parse and invoke correctly.
- **Result Management**
- Use `ResultManager` to manage results from function, memory, and agent calls for unified LLM consumption and post-processing.
- Use `get_tool_results()`, `get_memory_results()`, and `get_agent_results()` to retrieve results.
- **Error and Resource Management**
- Pay attention to exception handling and resource cleanup in async streaming to prevent memory leaks or deadlocks.
- Keep error handling and resource cleanup consistent across agent, function, and memory calls.
- **Extensibility**
If you need to support more types of calls or result management, you can extend the current structure as a reference.
## System Requirements
- Python 3.8 or higher
- Latest pip recommended for dependency installation
- Compatible with major operating systems (Windows, Linux, macOS)
- See requirements.txt for dependencies
## License
MIT License
Raw data
{
"_id": null,
"home_page": null,
"name": "mfcs",
"maintainer": null,
"docs_url": null,
"requires_python": ">=3.8",
"maintainer_email": null,
"keywords": "ai, function-calling, llm, nlp, prompt-engineering",
"author": null,
"author_email": "shideqin <shisdq@gmail.com>",
"download_url": "https://files.pythonhosted.org/packages/99/e5/cc841a5d73e8b5f320bb2b8940240d200d47e97b0ad259883f815fd0de1c/mfcs-0.1.13.tar.gz",
"platform": null,
"description": "# MFCS (Model Function Calling Standard)\n\n<div align=\"right\">\n <a href=\"README.md\">English</a> | \n <a href=\"README_CN.md\">\u4e2d\u6587</a>\n</div>\n\nModel Function Calling Standard\n\nA Python library for handling function calling in Large Language Models (LLMs).\n\n## Features\n\n- Standardized management for function, memory, and agent calls\n- Generate standardized prompt templates for function, memory, and agent calls\n- Parse function, memory, and agent calls from LLM output (supports both sync and async streaming)\n- Validate parameters and schemas for function, memory, and agent calls\n- Unified result management and formatted output for multiple call types\n- Async streaming support with real-time multi-type call processing\n- Easy unique identifier assignment and call tracking\n- Suitable for multi-agent collaboration, tool invocation, memory management, and more\n- Highly extensible and integrable for various LLM application scenarios\n\n## Installation\n\n```bash\npip install mfcs\n```\n\n## Configuration\n\n1. Copy `.env.example` to `.env`:\n```bash\ncp .env.example .env\n```\n\n2. Edit `.env` and set your environment variables:\n```bash\n# OpenAI API Configuration\nOPENAI_API_KEY=your-api-key-here\nOPENAI_API_BASE=your-api-base-url-here\n```\n\n## Example Installation\n\nTo run the example code, you need to install additional dependencies. The examples are located in the `examples` directory:\n\n```bash\ncd examples\npip install -r requirements.txt\n```\n\n## Usage\n\n## 1. Prompt Template Generation\n\n### 1.1 Generate Function Calling Prompt Templates\n\n```python\nfrom mfcs.function_prompt import FunctionPromptGenerator\n\n# Define your function schemas\nfunctions = [\n {\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather for a location\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state, e.g. San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"],\n \"description\": \"The unit of temperature to use\",\n \"default\": \"celsius\"\n }\n },\n \"required\": [\"location\"]\n }\n }\n]\n\n# Generate prompt template\ntemplate = FunctionPromptGenerator.generate_function_prompt(functions)\n```\n\n### 1.2 Memory Prompt Management\n\n```python\nfrom mfcs.memory_prompt import MemoryPromptGenerator\n\n# Define memory APIs\nmemory_apis = [\n {\n \"name\": \"store_preference\",\n \"description\": \"Store user preferences and settings\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"preference_type\": {\n \"type\": \"string\",\n \"description\": \"Type of preference to store\"\n },\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the preference\"\n }\n },\n \"required\": [\"preference_type\", \"value\"]\n }\n }\n]\n\n# Generate memory prompt template\ntemplate = MemoryPromptGenerator.generate_memory_prompt(memory_apis)\n```\n\n### 1.3 Agent Prompt Management\n\n```python\nfrom mfcs.agent_prompt import AgentPromptGenerator\n\n# Define agent APIs\nagent_apis = [\n {\n \"name\": \"send_result\",\n \"description\": \"Send result to a specified agent\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"content\": {\n \"type\": \"string\",\n \"description\": \"The content to send\"\n }\n },\n \"required\": [\"content\"]\n }\n }\n]\n\n# Generate agent prompt template\ntemplate = AgentPromptGenerator.generate_agent_prompt(agent_apis)\n```\n\n## 2. Parsing and Invocation\n\n### 2.1 Parse Function, Memory, and Agent Calls from Output\n\n```python\nfrom mfcs.response_parser import ResponseParser\n\noutput = \"\"\"\nI need to check the weather and save my preference, and also let agent_A handle the result.\n\n<mfcs_call>\n<instructions>Get the weather information for New York</instructions>\n<call_id>weather_1</call_id>\n<name>get_weather</name>\n<parameters>\n{\n \"location\": \"New York, NY\",\n \"unit\": \"fahrenheit\"\n}\n</parameters>\n</mfcs_call>\n\n<mfcs_memory>\n<instructions>Save user preference</instructions>\n<memory_id>memory_1</memory_id>\n<name>store_preference</name>\n<parameters>\n{\n \"preference_type\": \"weather_unit\",\n \"value\": \"fahrenheit\"\n}\n</parameters>\n</mfcs_memory>\n\n<mfcs_agent>\n<instructions>Send the weather result to agent_B</instructions>\n<agent_id>agent_1</agent_id>\n<name>send_result</name>\n<parameters>\n{\n \"content\": \"The weather in New York is 25\u00b0F, sent to agent_B.\"\n}\n</parameters>\n</mfcs_agent>\n\"\"\"\n\nparser = ResponseParser()\ncontent, tool_calls, memory_calls, agent_calls = parser.parse_output(output)\nprint(f\"Content: {content}\")\nprint(f\"Function calls: {tool_calls}\")\nprint(f\"Memory calls: {memory_calls}\")\nprint(f\"Agent calls: {agent_calls}\")\n\n# Explanation:\n# The output now includes <mfcs_call>, <mfcs_memory>, and <mfcs_agent> blocks.\n# The <mfcs_agent> block's <parameters> only contains a 'content' field.\n# The parser returns agent_calls for further agent-related processing.\n```\n\n### 2.2 Async Streaming Processing for Function, Memory, and Agent Calls\n\n```python\nfrom mfcs.response_parser import ResponseParser, ToolCall, MemoryCall, AgentCall\nfrom mfcs.result_manager import ResultManager\nimport json\n\nasync def process_stream():\n parser = ResponseParser()\n result_manager = ResultManager()\n \n async for delta, call_info, reasoning_content, usage, memory_info, agent_info in parser.parse_stream_output(stream):\n # Print reasoning content if present\n if reasoning_content:\n print(f\"Reasoning: {reasoning_content}\")\n\n # Print parsed content\n if delta:\n print(f\"Content: {delta.content} (finish reason: {delta.finish_reason})\")\n\n # Handle tool calls\n if call_info and isinstance(call_info, ToolCall):\n print(f\"\\nTool Call:\")\n print(f\"Instructions: {call_info.instructions}\")\n print(f\"Call ID: {call_info.call_id}\")\n print(f\"Name: {call_info.name}\")\n print(f\"Arguments: {json.dumps(call_info.arguments, indent=2)}\")\n # Simulate tool execution\n result_manager.add_tool_result(\n name=call_info.name,\n result={\"status\": \"success\", \"data\": f\"Simulated data for {call_info.name}\"},\n call_id=call_info.call_id\n )\n\n # Handle memory calls\n if memory_info and isinstance(memory_info, MemoryCall):\n print(f\"\\nMemory Call:\")\n print(f\"Instructions: {memory_info.instructions}\")\n print(f\"Memory ID: {memory_info.memory_id}\")\n print(f\"Name: {memory_info.name}\")\n print(f\"Arguments: {json.dumps(memory_info.arguments, indent=2)}\")\n # Simulate memory operation\n result_manager.add_memory_result(\n name=memory_info.name,\n result={\"status\": \"success\"},\n memory_id=memory_info.memory_id\n )\n\n # Handle agent calls\n if agent_info and isinstance(agent_info, AgentCall):\n print(f\"\\nAgent Call:\")\n print(f\"Instructions: {agent_info.instructions}\")\n print(f\"Agent ID: {agent_info.agent_id}\")\n print(f\"Name: {agent_info.name}\")\n print(f\"Arguments: {json.dumps(agent_info.arguments, indent=2)}\")\n # Simulate Agent operation\n result_manager.add_agent_result(\n name=agent_info.name,\n result={\"status\": \"success\"},\n memory_id=agent_info.agent_id\n )\n\n # Print usage statistics if available\n if usage:\n print(f\"Usage: {usage}\")\n\n print(\"\\nTool Results:\")\n print(result_manager.get_tool_results())\n print(\"Memory Results:\")\n print(result_manager.get_memory_results())\n print(\"Agent Results:\")\n print(result_manager.get_agent_results())\n```\n\n## 3. Result Management\n\n### 3.1 Function, Memory, and Agent Result Management\n\nThe Result Management provides a unified way to handle and format results from tool calls, memory operations, and agent operations in LLM interactions. It ensures consistency and proper cleanup.\n\n```python\n# Store tool call results\nresult_manager.add_tool_result(\n name=\"get_weather\", # Tool name\n result={\"temperature\": 25}, # Tool execution result\n call_id=\"weather_1\" # Unique identifier for this call\n)\n\n# Store memory operation results\nresult_manager.add_memory_result(\n name=\"store_preference\", # Memory operation name\n result={\"status\": \"success\"}, # Operation result\n memory_id=\"memory_1\" # Unique identifier for this operation\n)\n\n# Store agent operation results\nresult_manager.add_agent_result(\n name=\"send_result\", # Agent operation name\n result={\"status\": \"success\"}, # Operation result\n agent_id=\"agent_1\" # Unique identifier for this operation\n)\n\n# Get formatted results for LLM consumption\ntool_results = result_manager.get_tool_results()\n# Output format:\n# <tool_result>\n# {call_id: weather_1, name: get_weather} {\"temperature\": 25}\n# </tool_result>\n\nmemory_results = result_manager.get_memory_results()\n# Output format:\n# <memory_result>\n# {memory_id: memory_1, name: store_preference} {\"status\": \"success\"}\n# </memory_result>\n\nagent_results = result_manager.get_agent_results()\n# Output format:\n# <agent_result>\n# {agent_id: agent_1, name: send_result} {\"status\": \"success\"}\n# </agent_result>\n```\n\n## Examples\n\n### Agent Prompt Benchmark Test\n\nTests the complete functionality of Agent Prompt, including preventing unnecessary tool calls and validating tool name correctness.\n\nTo run the benchmark test:\n```bash\npython examples/agent_prompt_bench.py\n```\n\n### Function Calling Examples\n\nDemonstrates basic and async function calling with MFCS.\n\nTo run the basic example:\n```bash\npython examples/function_calling_examples.py\n```\nTo run the async example:\n```bash\npython examples/async_function_calling_examples.py\n```\n\n### Memory Function Examples\n\nDemonstrates memory prompt usage and async memory functions.\n\nTo run the memory example:\n```bash\npython examples/memory_function_examples.py\n```\nTo run the async memory example:\n```bash\npython examples/async_memory_function_examples.py\n```\n\n### A2A (Agent-to-Agent) Communication Examples\n\nDemonstrates how to use MFCS for agent-to-agent communication.\n\nTo run the server example:\n```bash\npython examples/a2a_server_example.py\n```\nTo run the async client example:\n```bash\npython examples/async_a2a_client_example.py\n```\n\n### MCP Client Examples\n\nDemonstrates MCP client usage (sync and async).\n\nTo run the MCP client example:\n```bash\npython examples/mcp_client_example.py\n```\nTo run the async MCP client example:\n```bash\npython examples/async_mcp_client_example.py\n```\n\n## Notes\n\n- **Python Version Requirement** \n Async features require Python 3.8 or higher.\n\n- **Security** \n Make sure to handle API keys and sensitive information securely to avoid leaks.\n\n- **API Call Implementation** \n The API calls in the example code are simulated. Replace them with your actual business logic in production.\n\n- **Unique Identifiers** \n - Use a unique `call_id` for each function call.\n - Use a unique `memory_id` for each memory operation.\n - Use a unique `agent_id` for each agent operation.\n\n- **Call Format Specification** \n - The `<mfcs_call>`, `<mfcs_memory>`, and `<mfcs_agent>` blocks' `<parameters>` fields should be standard JSON.\n - The `<mfcs_agent>` block's `<parameters>` should only contain a `content` field for consistency.\n\n- **Prompt Template and Call Rules** \n - Always generate prompt templates using the appropriate prompt generator.\n - Follow the call rules in the prompt templates to ensure the LLM can parse and invoke correctly.\n\n- **Result Management** \n - Use `ResultManager` to manage results from function, memory, and agent calls for unified LLM consumption and post-processing.\n - Use `get_tool_results()`, `get_memory_results()`, and `get_agent_results()` to retrieve results.\n\n- **Error and Resource Management** \n - Pay attention to exception handling and resource cleanup in async streaming to prevent memory leaks or deadlocks.\n - Keep error handling and resource cleanup consistent across agent, function, and memory calls.\n\n- **Extensibility** \n If you need to support more types of calls or result management, you can extend the current structure as a reference.\n\n## System Requirements\n\n- Python 3.8 or higher\n- Latest pip recommended for dependency installation\n- Compatible with major operating systems (Windows, Linux, macOS)\n- See requirements.txt for dependencies\n\n## License\n\nMIT License",
"bugtrack_url": null,
"license": null,
"summary": "A Python library for function calling in LLMs",
"version": "0.1.13",
"project_urls": {
"Bug Tracker": "https://github.com/mfcsorg/mfcs-python/issues",
"Homepage": "https://github.com/mfcsorg/mfcs-python"
},
"split_keywords": [
"ai",
" function-calling",
" llm",
" nlp",
" prompt-engineering"
],
"urls": [
{
"comment_text": null,
"digests": {
"blake2b_256": "814fb5b7d0a8b5cec2500871016906d7940ed3d594107e8275413645c537aa01",
"md5": "30dfe9a89231a2bd54f3408e5b11484e",
"sha256": "d6bba730ead4db3c9fc57ca19b83f70563fd667a6a0ce4a68ba0f8e6463235eb"
},
"downloads": -1,
"filename": "mfcs-0.1.13-py3-none-any.whl",
"has_sig": false,
"md5_digest": "30dfe9a89231a2bd54f3408e5b11484e",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": ">=3.8",
"size": 19050,
"upload_time": "2025-11-06T09:36:28",
"upload_time_iso_8601": "2025-11-06T09:36:28.001499Z",
"url": "https://files.pythonhosted.org/packages/81/4f/b5b7d0a8b5cec2500871016906d7940ed3d594107e8275413645c537aa01/mfcs-0.1.13-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": null,
"digests": {
"blake2b_256": "99e5cc841a5d73e8b5f320bb2b8940240d200d47e97b0ad259883f815fd0de1c",
"md5": "f42d881e6fa600576163616269b76907",
"sha256": "bc701df5878bf13e20e0b21b30649175c53ae5cacd1b319a48034a0f4239211c"
},
"downloads": -1,
"filename": "mfcs-0.1.13.tar.gz",
"has_sig": false,
"md5_digest": "f42d881e6fa600576163616269b76907",
"packagetype": "sdist",
"python_version": "source",
"requires_python": ">=3.8",
"size": 41793,
"upload_time": "2025-11-06T09:36:33",
"upload_time_iso_8601": "2025-11-06T09:36:33.587722Z",
"url": "https://files.pythonhosted.org/packages/99/e5/cc841a5d73e8b5f320bb2b8940240d200d47e97b0ad259883f815fd0de1c/mfcs-0.1.13.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2025-11-06 09:36:33",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "mfcsorg",
"github_project": "mfcs-python",
"travis_ci": false,
"coveralls": false,
"github_actions": false,
"lcname": "mfcs"
}