# Artifact Editor Tool Spec
```bash
pip install llama-index-tools-artifact-editor
```
The `ArtifactEditorToolSpec` is a stateful tool spec that allows you to edit an artifact in-memory.
Using JSON patch operations, an LLM/Agent can be prompted to create, modify, and iterate on an artifact like a report, code, or anything that can be represented as a Pydantic model.
The tool package also includes an `ArtifactMemoryBlock` that can be used to store the artifact and inject it into the LLM/Agent's memory.
## Usage
Below is an example of how to use the `ArtifactEditorToolSpec` and `ArtifactMemoryBlock` to create and iterate on a report.
```python
import asyncio
from pydantic import BaseModel, Field
from typing import List, Literal, Optional, Any
from llama_index.core.agent.workflow import (
FunctionAgent,
AgentStream,
ToolCallResult,
)
from llama_index.core.memory import Memory
from llama_index.tools.artifact_editor import (
ArtifactEditorToolSpec,
ArtifactMemoryBlock,
)
from llama_index.llms.openai import OpenAI
# Define the Artifact Pydantic Model
class TextBlock(BaseModel):
type: Literal["text"] = "text"
content: str = Field(description="The content of the text block")
class TableBlock(BaseModel):
type: Literal["table"] = "table"
headers: List[str] = Field(description="The headers of the table")
rows: List[List[str]] = Field(description="The rows of the table")
class ImageBlock(BaseModel):
type: Literal["image"] = "image"
image_url: str = Field(description="The URL of the image")
class Report(BaseModel):
"""Creates an instance of a report, which is a collection of text, tables, and images."""
title: str = Field(description="The title of the report")
content: List[TextBlock | TableBlock | ImageBlock] = Field(
description="The content of the report"
)
# Initialize the tool spec and tools
tool_spec = ArtifactEditorToolSpec(Report)
tools = tool_spec.to_tool_list()
# Initialize the memory
memory = Memory.from_defaults(
session_id="artifact_editor_01",
memory_blocks=[ArtifactMemoryBlock(artifact_spec=tool_spec)],
token_limit=60000,
chat_history_token_ratio=0.7,
)
# Create the agent
agent = FunctionAgent(
tools=tools,
llm=OpenAI(model="o3-mini"),
system_prompt="You are an expert in writing reports. When you write a report, I will be able to see it (and also any changes you make to it!), so no need to repeat it back to me once its written.",
)
# Run the agent in a basic chat loop
# As it runs, the artifact will be updated in-memory and
# can be accessed via the `get_current_artifact` method.
async def main():
while True:
user_msg = input("User: ").strip()
if user_msg.lower() in ["exit", "quit"]:
break
handler = agent.run(user_msg, memory=memory)
async for ev in handler.stream_events():
if isinstance(ev, AgentStream):
print(ev.delta, end="", flush=True)
elif isinstance(ev, ToolCallResult):
print(
f"\n\nCalling tool: {ev.tool_name} with kwargs: {ev.tool_kwargs}"
)
response = await handler
print(str(response))
print("Current artifact: ", tool_spec.get_current_artifact())
if __name__ == "__main__":
asyncio.run(main())
```
When running this, you might initially ask the agent:
```
User: Create a ficticous report about the history of the internet
```
And you will get a report with a list of blocks. Try asking it to modify the report!
```
User: Move the image to the top of the report
```
And you will get a report with the image moved to the top.
Check out the documentation for more example on [agents](https://docs.llamaindex.ai/en/stable/understanding/agent/), [memory](https://docs.llamaindex.ai/en/stable/module_guides/deploying/agents/memory/), and [tools](https://docs.llamaindex.ai/en/stable/module_guides/deploying/agents/tools/).
Raw data
{
"_id": null,
"home_page": null,
"name": "llama-index-tools-artifact-editor",
"maintainer": "Logan Markewich",
"docs_url": null,
"requires_python": "<4.0,>=3.10",
"maintainer_email": null,
"keywords": "agent, artifact, pydantic, tools",
"author": null,
"author_email": "Logan Markewich <logan@runllama.ai>",
"download_url": "https://files.pythonhosted.org/packages/ec/c8/139183ecf36dfd80ed4d720f5da9a37c7bdc36286c98bdce964e129bc7fa/llama_index_tools_artifact_editor-0.2.0.tar.gz",
"platform": null,
"description": "# Artifact Editor Tool Spec\n\n```bash\npip install llama-index-tools-artifact-editor\n```\n\nThe `ArtifactEditorToolSpec` is a stateful tool spec that allows you to edit an artifact in-memory.\n\nUsing JSON patch operations, an LLM/Agent can be prompted to create, modify, and iterate on an artifact like a report, code, or anything that can be represented as a Pydantic model.\n\nThe tool package also includes an `ArtifactMemoryBlock` that can be used to store the artifact and inject it into the LLM/Agent's memory.\n\n## Usage\n\nBelow is an example of how to use the `ArtifactEditorToolSpec` and `ArtifactMemoryBlock` to create and iterate on a report.\n\n```python\nimport asyncio\nfrom pydantic import BaseModel, Field\nfrom typing import List, Literal, Optional, Any\n\nfrom llama_index.core.agent.workflow import (\n FunctionAgent,\n AgentStream,\n ToolCallResult,\n)\nfrom llama_index.core.memory import Memory\nfrom llama_index.tools.artifact_editor import (\n ArtifactEditorToolSpec,\n ArtifactMemoryBlock,\n)\nfrom llama_index.llms.openai import OpenAI\n\n# Define the Artifact Pydantic Model\n\n\nclass TextBlock(BaseModel):\n type: Literal[\"text\"] = \"text\"\n content: str = Field(description=\"The content of the text block\")\n\n\nclass TableBlock(BaseModel):\n type: Literal[\"table\"] = \"table\"\n headers: List[str] = Field(description=\"The headers of the table\")\n rows: List[List[str]] = Field(description=\"The rows of the table\")\n\n\nclass ImageBlock(BaseModel):\n type: Literal[\"image\"] = \"image\"\n image_url: str = Field(description=\"The URL of the image\")\n\n\nclass Report(BaseModel):\n \"\"\"Creates an instance of a report, which is a collection of text, tables, and images.\"\"\"\n\n title: str = Field(description=\"The title of the report\")\n content: List[TextBlock | TableBlock | ImageBlock] = Field(\n description=\"The content of the report\"\n )\n\n\n# Initialize the tool spec and tools\ntool_spec = ArtifactEditorToolSpec(Report)\ntools = tool_spec.to_tool_list()\n\n# Initialize the memory\nmemory = Memory.from_defaults(\n session_id=\"artifact_editor_01\",\n memory_blocks=[ArtifactMemoryBlock(artifact_spec=tool_spec)],\n token_limit=60000,\n chat_history_token_ratio=0.7,\n)\n\n# Create the agent\nagent = FunctionAgent(\n tools=tools,\n llm=OpenAI(model=\"o3-mini\"),\n system_prompt=\"You are an expert in writing reports. When you write a report, I will be able to see it (and also any changes you make to it!), so no need to repeat it back to me once its written.\",\n)\n\n\n# Run the agent in a basic chat loop\n# As it runs, the artifact will be updated in-memory and\n# can be accessed via the `get_current_artifact` method.\nasync def main():\n while True:\n user_msg = input(\"User: \").strip()\n if user_msg.lower() in [\"exit\", \"quit\"]:\n break\n\n handler = agent.run(user_msg, memory=memory)\n async for ev in handler.stream_events():\n if isinstance(ev, AgentStream):\n print(ev.delta, end=\"\", flush=True)\n elif isinstance(ev, ToolCallResult):\n print(\n f\"\\n\\nCalling tool: {ev.tool_name} with kwargs: {ev.tool_kwargs}\"\n )\n\n response = await handler\n print(str(response))\n print(\"Current artifact: \", tool_spec.get_current_artifact())\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n```\n\nWhen running this, you might initially ask the agent:\n\n```\nUser: Create a ficticous report about the history of the internet\n```\n\nAnd you will get a report with a list of blocks. Try asking it to modify the report!\n\n```\nUser: Move the image to the top of the report\n```\n\nAnd you will get a report with the image moved to the top.\n\nCheck out the documentation for more example on [agents](https://docs.llamaindex.ai/en/stable/understanding/agent/), [memory](https://docs.llamaindex.ai/en/stable/module_guides/deploying/agents/memory/), and [tools](https://docs.llamaindex.ai/en/stable/module_guides/deploying/agents/tools/).\n",
"bugtrack_url": null,
"license": null,
"summary": "llama-index tools artifact editor integration",
"version": "0.2.0",
"project_urls": null,
"split_keywords": [
"agent",
" artifact",
" pydantic",
" tools"
],
"urls": [
{
"comment_text": null,
"digests": {
"blake2b_256": "d9edaf41f771fbbcdad1b4aff4e97f2fb2aadcbd4f3d7dd5d0205f8d1654fdf8",
"md5": "c46786c6acacf38b2114777906bd305c",
"sha256": "d35df6092b4c160c337314ec2ddbc641669eb18116431c5ba9e67998a5d4967b"
},
"downloads": -1,
"filename": "llama_index_tools_artifact_editor-0.2.0-py3-none-any.whl",
"has_sig": false,
"md5_digest": "c46786c6acacf38b2114777906bd305c",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": "<4.0,>=3.10",
"size": 8228,
"upload_time": "2025-07-30T20:51:21",
"upload_time_iso_8601": "2025-07-30T20:51:21.215550Z",
"url": "https://files.pythonhosted.org/packages/d9/ed/af41f771fbbcdad1b4aff4e97f2fb2aadcbd4f3d7dd5d0205f8d1654fdf8/llama_index_tools_artifact_editor-0.2.0-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": null,
"digests": {
"blake2b_256": "ecc8139183ecf36dfd80ed4d720f5da9a37c7bdc36286c98bdce964e129bc7fa",
"md5": "4ba52d2c621d19bfc537e63f32b36afc",
"sha256": "5088c24babe0c28b861600fd827906890776717802569277d70d24640d57c0b2"
},
"downloads": -1,
"filename": "llama_index_tools_artifact_editor-0.2.0.tar.gz",
"has_sig": false,
"md5_digest": "4ba52d2c621d19bfc537e63f32b36afc",
"packagetype": "sdist",
"python_version": "source",
"requires_python": "<4.0,>=3.10",
"size": 7975,
"upload_time": "2025-07-30T20:51:22",
"upload_time_iso_8601": "2025-07-30T20:51:22.537986Z",
"url": "https://files.pythonhosted.org/packages/ec/c8/139183ecf36dfd80ed4d720f5da9a37c7bdc36286c98bdce964e129bc7fa/llama_index_tools_artifact_editor-0.2.0.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2025-07-30 20:51:22",
"github": false,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"lcname": "llama-index-tools-artifact-editor"
}