qwen-vl-utils


Nameqwen-vl-utils JSON
Version 0.0.9 PyPI version JSON
download
home_pageNone
SummaryQwen Vision Language Model Utils - PyTorch
upload_time2025-01-26 07:11:40
maintainerNone
docs_urlNone
authorNone
requires_python>=3.8
licenseApache-2.0
keywords large language model pytorch qwen-vl vision language model
VCS
bugtrack_url
requirements No requirements were recorded.
Travis-CI No Travis.
coveralls test coverage No coveralls.
            # qwen-vl-utils

Qwen-VL Utils contains a set of helper functions for processing and integrating visual language information with Qwen-VL Series Model.

## Install

```bash
pip install qwen-vl-utils
```

## Usage

### Qwen2VL

```python
from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
from qwen_vl_utils import process_vision_info


# You can directly insert a local file path, a URL, or a base64-encoded image into the position where you want in the text.
messages = [
    # Image
    ## Local file path
    [{"role": "user", "content": [{"type": "image", "image": "file:///path/to/your/image.jpg"}, {"type": "text", "text": "Describe this image."}]}],
    ## Image URL
    [{"role": "user", "content": [{"type": "image", "image": "http://path/to/your/image.jpg"}, {"type": "text", "text": "Describe this image."}]}],
    ## Base64 encoded image
    [{"role": "user", "content": [{"type": "image", "image": "data:image;base64,/9j/..."}, {"type": "text", "text": "Describe this image."}]}],
    ## PIL.Image.Image
    [{"role": "user", "content": [{"type": "image", "image": pil_image}, {"type": "text", "text": "Describe this image."}]}],
    ## Model dynamically adjusts image size, specify dimensions if required.
    [{"role": "user", "content": [{"type": "image", "image": "file:///path/to/your/image.jpg", "resized_height": 280, "resized_width": 420}, {"type": "text", "text": "Describe this image."}]}],
    # Video
    ## Local video path
    [{"role": "user", "content": [{"type": "video", "video": "file:///path/to/video1.mp4"}, {"type": "text", "text": "Describe this video."}]}],
    ## Local video frames
    [{"role": "user", "content": [{"type": "video", "video": ["file:///path/to/extracted_frame1.jpg", "file:///path/to/extracted_frame2.jpg", "file:///path/to/extracted_frame3.jpg"],}, {"type": "text", "text": "Describe this video."},],}],
    ## Model dynamically adjusts video nframes, video height and width. specify args if required.
    [{"role": "user", "content": [{"type": "video", "video": "file:///path/to/video1.mp4", "fps": 2.0, "resized_height": 280, "resized_width": 280}, {"type": "text", "text": "Describe this video."}]}],
]

processor = AutoProcessor.from_pretrained(model_path)
model = Qwen2VLForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto", device_map="auto")
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
images, videos = process_vision_info(messages)
inputs = processor(text=text, images=images, videos=videos, padding=True, return_tensors="pt")
print(inputs)
generated_ids = model.generate(**inputs)
print(generated_ids)
```

### Qwen2.5VL

```python
from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
from qwen_vl_utils import process_vision_info


# You can set the maximum tokens for a video through the environment variable VIDEO_MAX_PIXELS
# based on the maximum tokens that the model can accept. 
# export VIDEO_MAX_PIXELS = 32000 * 28 * 28 * 0.9


# You can directly insert a local file path, a URL, or a base64-encoded image into the position where you want in the text.
messages = [
    # Image
    ## Local file path
    [{"role": "user", "content": [{"type": "image", "image": "file:///path/to/your/image.jpg"}, {"type": "text", "text": "Describe this image."}]}],
    ## Image URL
    [{"role": "user", "content": [{"type": "image", "image": "http://path/to/your/image.jpg"}, {"type": "text", "text": "Describe this image."}]}],
    ## Base64 encoded image
    [{"role": "user", "content": [{"type": "image", "image": "data:image;base64,/9j/..."}, {"type": "text", "text": "Describe this image."}]}],
    ## PIL.Image.Image
    [{"role": "user", "content": [{"type": "image", "image": pil_image}, {"type": "text", "text": "Describe this image."}]}],
    ## Model dynamically adjusts image size, specify dimensions if required.
    [{"role": "user", "content": [{"type": "image", "image": "file:///path/to/your/image.jpg", "resized_height": 280, "resized_width": 420}, {"type": "text", "text": "Describe this image."}]}],
    # Video
    ## Local video path
    [{"role": "user", "content": [{"type": "video", "video": "file:///path/to/video1.mp4"}, {"type": "text", "text": "Describe this video."}]}],
    ## Local video frames
    [{"role": "user", "content": [{"type": "video", "video": ["file:///path/to/extracted_frame1.jpg", "file:///path/to/extracted_frame2.jpg", "file:///path/to/extracted_frame3.jpg"],}, {"type": "text", "text": "Describe this video."},],}],
    ## Model dynamically adjusts video nframes, video height and width. specify args if required.
    [{"role": "user", "content": [{"type": "video", "video": "file:///path/to/video1.mp4", "fps": 2.0, "resized_height": 280, "resized_width": 280}, {"type": "text", "text": "Describe this video."}]}],
]

processor = AutoProcessor.from_pretrained(model_path)
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto", device_map="auto")
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
images, videos, video_kwargs = process_vision_info(messages, return_video_kwargs=True)
inputs = processor(text=text, images=images, videos=videos, padding=True, return_tensors="pt", **video_kwargs)
print(inputs)
generated_ids = model.generate(**inputs)
print(generated_ids)
```
            

Raw data

            {
    "_id": null,
    "home_page": null,
    "name": "qwen-vl-utils",
    "maintainer": null,
    "docs_url": null,
    "requires_python": ">=3.8",
    "maintainer_email": null,
    "keywords": "large language model, pytorch, qwen-vl, vision language model",
    "author": null,
    "author_email": "Qwen Team <chenkeqin.ckq@alibaba-inc.com>",
    "download_url": "https://files.pythonhosted.org/packages/08/4e/5deecf31bcc55019247323d499de420276fead5694e3cc4989f79684437d/qwen_vl_utils-0.0.9.tar.gz",
    "platform": null,
    "description": "# qwen-vl-utils\n\nQwen-VL Utils contains a set of helper functions for processing and integrating visual language information with Qwen-VL Series Model.\n\n## Install\n\n```bash\npip install qwen-vl-utils\n```\n\n## Usage\n\n### Qwen2VL\n\n```python\nfrom transformers import Qwen2VLForConditionalGeneration, AutoProcessor\nfrom qwen_vl_utils import process_vision_info\n\n\n# You can directly insert a local file path, a URL, or a base64-encoded image into the position where you want in the text.\nmessages = [\n    # Image\n    ## Local file path\n    [{\"role\": \"user\", \"content\": [{\"type\": \"image\", \"image\": \"file:///path/to/your/image.jpg\"}, {\"type\": \"text\", \"text\": \"Describe this image.\"}]}],\n    ## Image URL\n    [{\"role\": \"user\", \"content\": [{\"type\": \"image\", \"image\": \"http://path/to/your/image.jpg\"}, {\"type\": \"text\", \"text\": \"Describe this image.\"}]}],\n    ## Base64 encoded image\n    [{\"role\": \"user\", \"content\": [{\"type\": \"image\", \"image\": \"data:image;base64,/9j/...\"}, {\"type\": \"text\", \"text\": \"Describe this image.\"}]}],\n    ## PIL.Image.Image\n    [{\"role\": \"user\", \"content\": [{\"type\": \"image\", \"image\": pil_image}, {\"type\": \"text\", \"text\": \"Describe this image.\"}]}],\n    ## Model dynamically adjusts image size, specify dimensions if required.\n    [{\"role\": \"user\", \"content\": [{\"type\": \"image\", \"image\": \"file:///path/to/your/image.jpg\", \"resized_height\": 280, \"resized_width\": 420}, {\"type\": \"text\", \"text\": \"Describe this image.\"}]}],\n    # Video\n    ## Local video path\n    [{\"role\": \"user\", \"content\": [{\"type\": \"video\", \"video\": \"file:///path/to/video1.mp4\"}, {\"type\": \"text\", \"text\": \"Describe this video.\"}]}],\n    ## Local video frames\n    [{\"role\": \"user\", \"content\": [{\"type\": \"video\", \"video\": [\"file:///path/to/extracted_frame1.jpg\", \"file:///path/to/extracted_frame2.jpg\", \"file:///path/to/extracted_frame3.jpg\"],}, {\"type\": \"text\", \"text\": \"Describe this video.\"},],}],\n    ## Model dynamically adjusts video nframes, video height and width. specify args if required.\n    [{\"role\": \"user\", \"content\": [{\"type\": \"video\", \"video\": \"file:///path/to/video1.mp4\", \"fps\": 2.0, \"resized_height\": 280, \"resized_width\": 280}, {\"type\": \"text\", \"text\": \"Describe this video.\"}]}],\n]\n\nprocessor = AutoProcessor.from_pretrained(model_path)\nmodel = Qwen2VLForConditionalGeneration.from_pretrained(model_path, torch_dtype=\"auto\", device_map=\"auto\")\ntext = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\nimages, videos = process_vision_info(messages)\ninputs = processor(text=text, images=images, videos=videos, padding=True, return_tensors=\"pt\")\nprint(inputs)\ngenerated_ids = model.generate(**inputs)\nprint(generated_ids)\n```\n\n### Qwen2.5VL\n\n```python\nfrom transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor\nfrom qwen_vl_utils import process_vision_info\n\n\n# You can set the maximum tokens for a video through the environment variable VIDEO_MAX_PIXELS\n# based on the maximum tokens that the model can accept. \n# export VIDEO_MAX_PIXELS = 32000 * 28 * 28 * 0.9\n\n\n# You can directly insert a local file path, a URL, or a base64-encoded image into the position where you want in the text.\nmessages = [\n    # Image\n    ## Local file path\n    [{\"role\": \"user\", \"content\": [{\"type\": \"image\", \"image\": \"file:///path/to/your/image.jpg\"}, {\"type\": \"text\", \"text\": \"Describe this image.\"}]}],\n    ## Image URL\n    [{\"role\": \"user\", \"content\": [{\"type\": \"image\", \"image\": \"http://path/to/your/image.jpg\"}, {\"type\": \"text\", \"text\": \"Describe this image.\"}]}],\n    ## Base64 encoded image\n    [{\"role\": \"user\", \"content\": [{\"type\": \"image\", \"image\": \"data:image;base64,/9j/...\"}, {\"type\": \"text\", \"text\": \"Describe this image.\"}]}],\n    ## PIL.Image.Image\n    [{\"role\": \"user\", \"content\": [{\"type\": \"image\", \"image\": pil_image}, {\"type\": \"text\", \"text\": \"Describe this image.\"}]}],\n    ## Model dynamically adjusts image size, specify dimensions if required.\n    [{\"role\": \"user\", \"content\": [{\"type\": \"image\", \"image\": \"file:///path/to/your/image.jpg\", \"resized_height\": 280, \"resized_width\": 420}, {\"type\": \"text\", \"text\": \"Describe this image.\"}]}],\n    # Video\n    ## Local video path\n    [{\"role\": \"user\", \"content\": [{\"type\": \"video\", \"video\": \"file:///path/to/video1.mp4\"}, {\"type\": \"text\", \"text\": \"Describe this video.\"}]}],\n    ## Local video frames\n    [{\"role\": \"user\", \"content\": [{\"type\": \"video\", \"video\": [\"file:///path/to/extracted_frame1.jpg\", \"file:///path/to/extracted_frame2.jpg\", \"file:///path/to/extracted_frame3.jpg\"],}, {\"type\": \"text\", \"text\": \"Describe this video.\"},],}],\n    ## Model dynamically adjusts video nframes, video height and width. specify args if required.\n    [{\"role\": \"user\", \"content\": [{\"type\": \"video\", \"video\": \"file:///path/to/video1.mp4\", \"fps\": 2.0, \"resized_height\": 280, \"resized_width\": 280}, {\"type\": \"text\", \"text\": \"Describe this video.\"}]}],\n]\n\nprocessor = AutoProcessor.from_pretrained(model_path)\nmodel = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_path, torch_dtype=\"auto\", device_map=\"auto\")\ntext = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\nimages, videos, video_kwargs = process_vision_info(messages, return_video_kwargs=True)\ninputs = processor(text=text, images=images, videos=videos, padding=True, return_tensors=\"pt\", **video_kwargs)\nprint(inputs)\ngenerated_ids = model.generate(**inputs)\nprint(generated_ids)\n```",
    "bugtrack_url": null,
    "license": "Apache-2.0",
    "summary": "Qwen Vision Language Model Utils - PyTorch",
    "version": "0.0.9",
    "project_urls": {
        "Homepage": "https://github.com/QwenLM/Qwen2-VL/tree/main/qwen-vl-utils",
        "Issues": "https://github.com/QwenLM/Qwen2-VL/issues",
        "Repository": "https://github.com/QwenLM/Qwen2-VL.git"
    },
    "split_keywords": [
        "large language model",
        " pytorch",
        " qwen-vl",
        " vision language model"
    ],
    "urls": [
        {
            "comment_text": null,
            "digests": {
                "blake2b_256": "c5ba436dae71212aac24cd82a1b7a4462d534260d179fa7100c9b8d4238659e2",
                "md5": "c15655d14dd1c35b58279af75495f833",
                "sha256": "296b54669f5d0f07e63b59da78f382adf0d32f46b436e648c5248c300bfdab4e"
            },
            "downloads": -1,
            "filename": "qwen_vl_utils-0.0.9-py3-none-any.whl",
            "has_sig": false,
            "md5_digest": "c15655d14dd1c35b58279af75495f833",
            "packagetype": "bdist_wheel",
            "python_version": "py3",
            "requires_python": ">=3.8",
            "size": 6645,
            "upload_time": "2025-01-26T07:11:38",
            "upload_time_iso_8601": "2025-01-26T07:11:38.761333Z",
            "url": "https://files.pythonhosted.org/packages/c5/ba/436dae71212aac24cd82a1b7a4462d534260d179fa7100c9b8d4238659e2/qwen_vl_utils-0.0.9-py3-none-any.whl",
            "yanked": false,
            "yanked_reason": null
        },
        {
            "comment_text": null,
            "digests": {
                "blake2b_256": "084e5deecf31bcc55019247323d499de420276fead5694e3cc4989f79684437d",
                "md5": "c494d454b87c9c20802e7d9c5e70d265",
                "sha256": "df85c938c075a75277b76f33590bdc46cddd92bb65e74946c1e2bbe10c6f06da"
            },
            "downloads": -1,
            "filename": "qwen_vl_utils-0.0.9.tar.gz",
            "has_sig": false,
            "md5_digest": "c494d454b87c9c20802e7d9c5e70d265",
            "packagetype": "sdist",
            "python_version": "source",
            "requires_python": ">=3.8",
            "size": 6968,
            "upload_time": "2025-01-26T07:11:40",
            "upload_time_iso_8601": "2025-01-26T07:11:40.791082Z",
            "url": "https://files.pythonhosted.org/packages/08/4e/5deecf31bcc55019247323d499de420276fead5694e3cc4989f79684437d/qwen_vl_utils-0.0.9.tar.gz",
            "yanked": false,
            "yanked_reason": null
        }
    ],
    "upload_time": "2025-01-26 07:11:40",
    "github": true,
    "gitlab": false,
    "bitbucket": false,
    "codeberg": false,
    "github_user": "QwenLM",
    "github_project": "Qwen2-VL",
    "travis_ci": false,
    "coveralls": false,
    "github_actions": false,
    "lcname": "qwen-vl-utils"
}
        
Elapsed time: 0.43840s