# cjm-transcription-plugin-voxtral-vllm
<!-- WARNING: THIS FILE WAS AUTOGENERATED! DO NOT EDIT! -->
## Install
``` bash
pip install cjm_transcription_plugin_voxtral_vllm
```
## Project Structure
nbs/
└── plugin.ipynb # Plugin implementation for Mistral Voxtral transcription through vLLM server
Total: 1 notebook
## Module Dependencies
``` mermaid
graph LR
plugin[plugin<br/>Voxtral VLLM Plugin]
```
No cross-module dependencies detected.
## CLI Reference
No CLI commands found in this project.
## Module Overview
Detailed documentation for each module in the project:
### Voxtral VLLM Plugin (`plugin.ipynb`)
> Plugin implementation for Mistral Voxtral transcription through vLLM
> server
#### Import
``` python
from cjm_transcription_plugin_voxtral_vllm.plugin import (
VLLMServer,
VoxtralVLLMPlugin
)
```
#### Functions
``` python
@patch
def supports_streaming(
self: VoxtralVLLMPlugin # The plugin instance
) -> bool: # True if streaming is supported
"Check if this plugin supports streaming transcription."
```
``` python
@patch
def execute_stream(
self: VoxtralVLLMPlugin, # The plugin instance
audio: Union[AudioData, str, Path], # Audio data or path to audio file
**kwargs # Additional plugin-specific parameters
) -> Generator[str, None, TranscriptionResult]: # Yields text chunks, returns final result
"Stream transcription results chunk by chunk."
```
#### Classes
``` python
class VLLMServer:
def __init__(
self,
model: str = "mistralai/Voxtral-Mini-3B-2507", # Model name to serve
port: int = 8000, # Port for the server
host: str = "0.0.0.0", # Host address to bind to
gpu_memory_utilization: float = 0.85, # Fraction of GPU memory to use
log_level: str = "INFO", # Logging level (DEBUG, INFO, WARNING, ERROR)
capture_logs: bool = True, # Whether to capture and display server logs
**kwargs # Additional vLLM server arguments
)
"vLLM server manager for Voxtral models."
def __init__(
self,
model: str = "mistralai/Voxtral-Mini-3B-2507", # Model name to serve
port: int = 8000, # Port for the server
host: str = "0.0.0.0", # Host address to bind to
gpu_memory_utilization: float = 0.85, # Fraction of GPU memory to use
log_level: str = "INFO", # Logging level (DEBUG, INFO, WARNING, ERROR)
capture_logs: bool = True, # Whether to capture and display server logs
**kwargs # Additional vLLM server arguments
)
def add_log_callback(
self,
callback: Callable[[str], None] # Function that receives log line strings
) -> None: # Returns nothing
"Add a callback function to receive each log line."
def start(
self,
wait_for_ready: bool = True, # Wait for server to be ready before returning
timeout: int = 120, # Maximum seconds to wait for server readiness
show_progress: bool = True # Show progress indicators during startup
) -> None: # Returns nothing
"Start the vLLM server."
def stop(self) -> None: # Returns nothing
"""Stop the vLLM server."""
if self.process and self.process.poll() is None
"Stop the vLLM server."
def restart(self) -> None: # Returns nothing
"""Restart the server."""
self.stop()
time.sleep(2)
self.start()
def is_running(self) -> bool: # True if server is running and responsive
"Restart the server."
def is_running(self) -> bool: # True if server is running and responsive
"Check if server is running and responsive."
def get_recent_logs(
self,
n: int = 100 # Number of recent log lines to retrieve
) -> List[str]: # List of recent log lines
"Get the most recent n log lines."
def get_metrics_from_logs(self) -> dict: # Dictionary with performance metrics
"""Parse recent logs to extract performance metrics."""
metrics = {
"prompt_throughput": 0.0,
"Parse recent logs to extract performance metrics."
def tail_logs(
self,
follow: bool = True, # Continue displaying new logs as they arrive
n: int = 10 # Number of initial lines to display
) -> None: # Returns nothing
"Tail the server logs (similar to tail -f)."
```
``` python
class VoxtralVLLMPlugin:
def __init__(self):
"""Initialize the Voxtral VLLM plugin with default configuration."""
self.logger = logging.getLogger(f"{__name__}.{type(self).__name__}")
self.config = {}
self.server: Optional[VLLMServer] = None
"Mistral Voxtral transcription plugin via vLLM server."
def __init__(self):
"""Initialize the Voxtral VLLM plugin with default configuration."""
self.logger = logging.getLogger(f"{__name__}.{type(self).__name__}")
self.config = {}
self.server: Optional[VLLMServer] = None
"Initialize the Voxtral VLLM plugin with default configuration."
def name(self) -> str: # The plugin name identifier
"""Get the plugin name identifier."""
return "voxtral_vllm"
@property
def version(self) -> str: # The plugin version string
"Get the plugin name identifier."
def version(self) -> str: # The plugin version string
"""Get the plugin version string."""
return "1.0.0"
@property
def supported_formats(self) -> List[str]: # List of supported audio formats
"Get the plugin version string."
def supported_formats(self) -> List[str]: # List of supported audio formats
"""Get the list of supported audio file formats."""
return ["wav", "mp3", "flac", "m4a", "ogg", "webm", "mp4", "avi", "mov"]
@staticmethod
def get_config_schema() -> Dict[str, Any]: # Configuration schema dictionary
"Get the list of supported audio file formats."
def get_config_schema() -> Dict[str, Any]: # Configuration schema dictionary
"""Return configuration schema for Voxtral VLLM."""
return {
"$schema": "http://json-schema.org/draft-07/schema#",
"Return configuration schema for Voxtral VLLM."
def get_current_config(self) -> Dict[str, Any]: # Current configuration dictionary
"""Return current configuration."""
defaults = self.get_config_defaults()
return {**defaults, **self.config}
def initialize(
self,
config: Optional[Dict[str, Any]] = None # Configuration dictionary to initialize the plugin
) -> None: # Returns nothing
"Return current configuration."
def initialize(
self,
config: Optional[Dict[str, Any]] = None # Configuration dictionary to initialize the plugin
) -> None: # Returns nothing
"Initialize the plugin with configuration."
def execute(
self,
audio: Union[AudioData, str, Path], # Audio data or path to audio file to transcribe
**kwargs # Additional arguments to override config
) -> TranscriptionResult: # Transcription result with text and metadata
"Transcribe audio using Voxtral via vLLM."
def is_available(self) -> bool: # True if vLLM and dependencies are available
"""Check if vLLM and required dependencies are available."""
if not OPENAI_AVAILABLE
"Check if vLLM and required dependencies are available."
def cleanup(self) -> None: # Returns nothing
"""Clean up resources."""
self.logger.info("Cleaning up Voxtral VLLM plugin")
# Stop managed server if running
if self.config.get("server_mode") == "managed" and self.server
"Clean up resources."
```
Raw data
{
"_id": null,
"home_page": "https://github.com/cj-mills/cjm-transcription-plugin-voxtral-vllm",
"name": "cjm-transcription-plugin-voxtral-vllm",
"maintainer": null,
"docs_url": null,
"requires_python": ">=3.11",
"maintainer_email": null,
"keywords": "nbdev jupyter notebook python",
"author": "Christian J. Mills",
"author_email": "9126128+cj-mills@users.noreply.github.com",
"download_url": "https://files.pythonhosted.org/packages/be/58/6f680c8c72b9c203c575df6e6421c06504e28abffd0625558aa7d0d3bb70/cjm_transcription_plugin_voxtral_vllm-0.0.4.tar.gz",
"platform": null,
"description": "# cjm-transcription-plugin-voxtral-vllm\n\n\n<!-- WARNING: THIS FILE WAS AUTOGENERATED! DO NOT EDIT! -->\n\n## Install\n\n``` bash\npip install cjm_transcription_plugin_voxtral_vllm\n```\n\n## Project Structure\n\n nbs/\n \u2514\u2500\u2500 plugin.ipynb # Plugin implementation for Mistral Voxtral transcription through vLLM server\n\nTotal: 1 notebook\n\n## Module Dependencies\n\n``` mermaid\ngraph LR\n plugin[plugin<br/>Voxtral VLLM Plugin]\n```\n\nNo cross-module dependencies detected.\n\n## CLI Reference\n\nNo CLI commands found in this project.\n\n## Module Overview\n\nDetailed documentation for each module in the project:\n\n### Voxtral VLLM Plugin (`plugin.ipynb`)\n\n> Plugin implementation for Mistral Voxtral transcription through vLLM\n> server\n\n#### Import\n\n``` python\nfrom cjm_transcription_plugin_voxtral_vllm.plugin import (\n VLLMServer,\n VoxtralVLLMPlugin\n)\n```\n\n#### Functions\n\n``` python\n@patch\ndef supports_streaming(\n self: VoxtralVLLMPlugin # The plugin instance\n) -> bool: # True if streaming is supported\n \"Check if this plugin supports streaming transcription.\"\n```\n\n``` python\n@patch\ndef execute_stream(\n self: VoxtralVLLMPlugin, # The plugin instance\n audio: Union[AudioData, str, Path], # Audio data or path to audio file\n **kwargs # Additional plugin-specific parameters\n) -> Generator[str, None, TranscriptionResult]: # Yields text chunks, returns final result\n \"Stream transcription results chunk by chunk.\"\n```\n\n#### Classes\n\n``` python\nclass VLLMServer:\n def __init__(\n self,\n model: str = \"mistralai/Voxtral-Mini-3B-2507\", # Model name to serve\n port: int = 8000, # Port for the server\n host: str = \"0.0.0.0\", # Host address to bind to\n gpu_memory_utilization: float = 0.85, # Fraction of GPU memory to use\n log_level: str = \"INFO\", # Logging level (DEBUG, INFO, WARNING, ERROR)\n capture_logs: bool = True, # Whether to capture and display server logs\n **kwargs # Additional vLLM server arguments\n )\n \"vLLM server manager for Voxtral models.\"\n \n def __init__(\n self,\n model: str = \"mistralai/Voxtral-Mini-3B-2507\", # Model name to serve\n port: int = 8000, # Port for the server\n host: str = \"0.0.0.0\", # Host address to bind to\n gpu_memory_utilization: float = 0.85, # Fraction of GPU memory to use\n log_level: str = \"INFO\", # Logging level (DEBUG, INFO, WARNING, ERROR)\n capture_logs: bool = True, # Whether to capture and display server logs\n **kwargs # Additional vLLM server arguments\n )\n \n def add_log_callback(\n self, \n callback: Callable[[str], None] # Function that receives log line strings\n ) -> None: # Returns nothing\n \"Add a callback function to receive each log line.\"\n \n def start(\n self, \n wait_for_ready: bool = True, # Wait for server to be ready before returning\n timeout: int = 120, # Maximum seconds to wait for server readiness\n show_progress: bool = True # Show progress indicators during startup\n ) -> None: # Returns nothing\n \"Start the vLLM server.\"\n \n def stop(self) -> None: # Returns nothing\n \"\"\"Stop the vLLM server.\"\"\"\n if self.process and self.process.poll() is None\n \"Stop the vLLM server.\"\n \n def restart(self) -> None: # Returns nothing\n \"\"\"Restart the server.\"\"\"\n self.stop()\n time.sleep(2)\n self.start()\n \n def is_running(self) -> bool: # True if server is running and responsive\n \"Restart the server.\"\n \n def is_running(self) -> bool: # True if server is running and responsive\n \"Check if server is running and responsive.\"\n \n def get_recent_logs(\n self, \n n: int = 100 # Number of recent log lines to retrieve\n ) -> List[str]: # List of recent log lines\n \"Get the most recent n log lines.\"\n \n def get_metrics_from_logs(self) -> dict: # Dictionary with performance metrics\n \"\"\"Parse recent logs to extract performance metrics.\"\"\"\n metrics = {\n \"prompt_throughput\": 0.0,\n \"Parse recent logs to extract performance metrics.\"\n \n def tail_logs(\n self, \n follow: bool = True, # Continue displaying new logs as they arrive\n n: int = 10 # Number of initial lines to display\n ) -> None: # Returns nothing\n \"Tail the server logs (similar to tail -f).\"\n```\n\n``` python\nclass VoxtralVLLMPlugin:\n def __init__(self):\n \"\"\"Initialize the Voxtral VLLM plugin with default configuration.\"\"\"\n self.logger = logging.getLogger(f\"{__name__}.{type(self).__name__}\")\n self.config = {}\n self.server: Optional[VLLMServer] = None\n \"Mistral Voxtral transcription plugin via vLLM server.\"\n \n def __init__(self):\n \"\"\"Initialize the Voxtral VLLM plugin with default configuration.\"\"\"\n self.logger = logging.getLogger(f\"{__name__}.{type(self).__name__}\")\n self.config = {}\n self.server: Optional[VLLMServer] = None\n \"Initialize the Voxtral VLLM plugin with default configuration.\"\n \n def name(self) -> str: # The plugin name identifier\n \"\"\"Get the plugin name identifier.\"\"\"\n return \"voxtral_vllm\"\n \n @property\n def version(self) -> str: # The plugin version string\n \"Get the plugin name identifier.\"\n \n def version(self) -> str: # The plugin version string\n \"\"\"Get the plugin version string.\"\"\"\n return \"1.0.0\"\n \n @property\n def supported_formats(self) -> List[str]: # List of supported audio formats\n \"Get the plugin version string.\"\n \n def supported_formats(self) -> List[str]: # List of supported audio formats\n \"\"\"Get the list of supported audio file formats.\"\"\"\n return [\"wav\", \"mp3\", \"flac\", \"m4a\", \"ogg\", \"webm\", \"mp4\", \"avi\", \"mov\"]\n \n @staticmethod\n def get_config_schema() -> Dict[str, Any]: # Configuration schema dictionary\n \"Get the list of supported audio file formats.\"\n \n def get_config_schema() -> Dict[str, Any]: # Configuration schema dictionary\n \"\"\"Return configuration schema for Voxtral VLLM.\"\"\"\n return {\n \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n \"Return configuration schema for Voxtral VLLM.\"\n \n def get_current_config(self) -> Dict[str, Any]: # Current configuration dictionary\n \"\"\"Return current configuration.\"\"\"\n defaults = self.get_config_defaults()\n return {**defaults, **self.config}\n \n def initialize(\n self,\n config: Optional[Dict[str, Any]] = None # Configuration dictionary to initialize the plugin\n ) -> None: # Returns nothing\n \"Return current configuration.\"\n \n def initialize(\n self,\n config: Optional[Dict[str, Any]] = None # Configuration dictionary to initialize the plugin\n ) -> None: # Returns nothing\n \"Initialize the plugin with configuration.\"\n \n def execute(\n self,\n audio: Union[AudioData, str, Path], # Audio data or path to audio file to transcribe\n **kwargs # Additional arguments to override config\n ) -> TranscriptionResult: # Transcription result with text and metadata\n \"Transcribe audio using Voxtral via vLLM.\"\n \n def is_available(self) -> bool: # True if vLLM and dependencies are available\n \"\"\"Check if vLLM and required dependencies are available.\"\"\"\n if not OPENAI_AVAILABLE\n \"Check if vLLM and required dependencies are available.\"\n \n def cleanup(self) -> None: # Returns nothing\n \"\"\"Clean up resources.\"\"\"\n self.logger.info(\"Cleaning up Voxtral VLLM plugin\")\n \n # Stop managed server if running\n if self.config.get(\"server_mode\") == \"managed\" and self.server\n \"Clean up resources.\"\n```\n",
"bugtrack_url": null,
"license": "Apache Software License 2.0",
"summary": "Mistral Voxtral plugin for the cjm-transcription-plugin-system library - provides local speech-to-text transcription through vLLM with configurable model selection and parameter control.",
"version": "0.0.4",
"project_urls": {
"Homepage": "https://github.com/cj-mills/cjm-transcription-plugin-voxtral-vllm"
},
"split_keywords": [
"nbdev",
"jupyter",
"notebook",
"python"
],
"urls": [
{
"comment_text": null,
"digests": {
"blake2b_256": "dc8ae556876d8a4aa7fc6feec7b2ad43d9b012ff6119d2cdae420e87b23ad57d",
"md5": "7207bd7f2fa65b9b96ace8e8d84b85fc",
"sha256": "ff18e082e6633b99f271b441cde52bd2b4ae60e17d296a12f5feda1f18ba48d1"
},
"downloads": -1,
"filename": "cjm_transcription_plugin_voxtral_vllm-0.0.4-py3-none-any.whl",
"has_sig": false,
"md5_digest": "7207bd7f2fa65b9b96ace8e8d84b85fc",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": ">=3.11",
"size": 17468,
"upload_time": "2025-10-25T00:55:08",
"upload_time_iso_8601": "2025-10-25T00:55:08.864304Z",
"url": "https://files.pythonhosted.org/packages/dc/8a/e556876d8a4aa7fc6feec7b2ad43d9b012ff6119d2cdae420e87b23ad57d/cjm_transcription_plugin_voxtral_vllm-0.0.4-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": null,
"digests": {
"blake2b_256": "be586f680c8c72b9c203c575df6e6421c06504e28abffd0625558aa7d0d3bb70",
"md5": "5004ca13499876728379838840987931",
"sha256": "9a84b6a442273cc027ee745ed99b8b9fe61f3e5121ea2918386a331a453d50a0"
},
"downloads": -1,
"filename": "cjm_transcription_plugin_voxtral_vllm-0.0.4.tar.gz",
"has_sig": false,
"md5_digest": "5004ca13499876728379838840987931",
"packagetype": "sdist",
"python_version": "source",
"requires_python": ">=3.11",
"size": 18848,
"upload_time": "2025-10-25T00:55:09",
"upload_time_iso_8601": "2025-10-25T00:55:09.773219Z",
"url": "https://files.pythonhosted.org/packages/be/58/6f680c8c72b9c203c575df6e6421c06504e28abffd0625558aa7d0d3bb70/cjm_transcription_plugin_voxtral_vllm-0.0.4.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2025-10-25 00:55:09",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "cj-mills",
"github_project": "cjm-transcription-plugin-voxtral-vllm",
"travis_ci": false,
"coveralls": false,
"github_actions": true,
"lcname": "cjm-transcription-plugin-voxtral-vllm"
}