# ControlNet auxiliary models
This is a PyPi installable package of [lllyasviel's ControlNet Annotators](https://github.com/lllyasviel/ControlNet/tree/main/annotator)
The code is copy-pasted from the respective folders in https://github.com/lllyasviel/ControlNet/tree/main/annotator and connected to [the 🤗 Hub](https://huggingface.co/lllyasviel/Annotators).
All credit & copyright goes to https://github.com/lllyasviel .
## Install
```
pip install controlnet-aux==0.0.3
```
## Usage
```python
from PIL import Image
import requests
from io import BytesIO
from controlnet_aux import HEDdetector, MidasDetector, MLSDdetector, OpenposeDetector, PidiNetDetector, NormalBaeDetector, LineartDetector, LineartAnimeDetector, CannyDetector, ContentShuffleDetector, ZoeDetector, MediapipeFaceDetector
# load image
url = "https://huggingface.co/lllyasviel/sd-controlnet-openpose/resolve/main/images/pose.png"
response = requests.get(url)
img = Image.open(BytesIO(response.content)).convert("RGB").resize((512, 512))
# load checkpoints
hed = HEDdetector.from_pretrained("lllyasviel/Annotators")
midas = MidasDetector.from_pretrained("lllyasviel/Annotators")
mlsd = MLSDdetector.from_pretrained("lllyasviel/Annotators")
open_pose = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
pidi = PidiNetDetector.from_pretrained("lllyasviel/Annotators")
normal_bae = NormalBaeDetector.from_pretrained("lllyasviel/Annotators")
lineart = LineartDetector.from_pretrained("lllyasviel/Annotators")
lineart_anime = LineartAnimeDetector.from_pretrained("lllyasviel/Annotators")
zoe = ZoeDetector.from_pretrained("lllyasviel/Annotators")
# instantiate
canny = CannyDetector()
content = ContentShuffleDetector()
face_detector = MediapipeFaceDetector()
# process
processed_image_hed = hed(img)
processed_image_midas = midas(img)
processed_image_mlsd = mlsd(img)
processed_image_open_pose = open_pose(img, hand_and_face=True)
processed_image_pidi = pidi(img, safe=True)
processed_image_normal_bae = normal_bae(img)
processed_image_lineart = lineart(img, coarse=True)
processed_image_lineart_anime = lineart_anime(img)
processed_image_zoe = zoe(img)
processed_image_canny = canny(img)
processed_image_content = content(img)
processed_image_mediapipe_face = face_detector(img)
```
Raw data
{
"_id": null,
"home_page": "https://github.com/patrickvonplaten/controlnet_aux",
"name": "controlnet-aux-voltaml",
"maintainer": "",
"docs_url": null,
"requires_python": ">=3.7.0",
"maintainer_email": "",
"keywords": "deep learning",
"author": "The HuggingFace team",
"author_email": "patrick@huggingface.co",
"download_url": "https://files.pythonhosted.org/packages/f5/fe/427d7d2ce1ca97f1c4d6c9b54a6fc070d97fb34c769cba8269e661a375ed/controlnet_aux_voltaml-0.3.2.tar.gz",
"platform": null,
"description": "# ControlNet auxiliary models\n\nThis is a PyPi installable package of [lllyasviel's ControlNet Annotators](https://github.com/lllyasviel/ControlNet/tree/main/annotator)\n\nThe code is copy-pasted from the respective folders in https://github.com/lllyasviel/ControlNet/tree/main/annotator and connected to [the \ud83e\udd17 Hub](https://huggingface.co/lllyasviel/Annotators).\n\nAll credit & copyright goes to https://github.com/lllyasviel .\n\n## Install\n\n```\npip install controlnet-aux==0.0.3\n```\n\n## Usage\n\n```python\nfrom PIL import Image\nimport requests\nfrom io import BytesIO\nfrom controlnet_aux import HEDdetector, MidasDetector, MLSDdetector, OpenposeDetector, PidiNetDetector, NormalBaeDetector, LineartDetector, LineartAnimeDetector, CannyDetector, ContentShuffleDetector, ZoeDetector, MediapipeFaceDetector\n\n# load image\nurl = \"https://huggingface.co/lllyasviel/sd-controlnet-openpose/resolve/main/images/pose.png\"\n\nresponse = requests.get(url)\nimg = Image.open(BytesIO(response.content)).convert(\"RGB\").resize((512, 512))\n\n# load checkpoints\nhed = HEDdetector.from_pretrained(\"lllyasviel/Annotators\")\nmidas = MidasDetector.from_pretrained(\"lllyasviel/Annotators\")\nmlsd = MLSDdetector.from_pretrained(\"lllyasviel/Annotators\")\nopen_pose = OpenposeDetector.from_pretrained(\"lllyasviel/Annotators\")\npidi = PidiNetDetector.from_pretrained(\"lllyasviel/Annotators\")\nnormal_bae = NormalBaeDetector.from_pretrained(\"lllyasviel/Annotators\")\nlineart = LineartDetector.from_pretrained(\"lllyasviel/Annotators\")\nlineart_anime = LineartAnimeDetector.from_pretrained(\"lllyasviel/Annotators\")\nzoe = ZoeDetector.from_pretrained(\"lllyasviel/Annotators\")\n\n# instantiate\ncanny = CannyDetector()\ncontent = ContentShuffleDetector()\nface_detector = MediapipeFaceDetector()\n\n\n# process\nprocessed_image_hed = hed(img)\nprocessed_image_midas = midas(img)\nprocessed_image_mlsd = mlsd(img)\nprocessed_image_open_pose = open_pose(img, hand_and_face=True)\nprocessed_image_pidi = pidi(img, safe=True)\nprocessed_image_normal_bae = normal_bae(img)\nprocessed_image_lineart = lineart(img, coarse=True)\nprocessed_image_lineart_anime = lineart_anime(img)\nprocessed_image_zoe = zoe(img)\n\nprocessed_image_canny = canny(img)\nprocessed_image_content = content(img)\nprocessed_image_mediapipe_face = face_detector(img)\n```\n",
"bugtrack_url": null,
"license": "Apache",
"summary": "Utilities for preprocessing images for controlnet",
"version": "0.3.2",
"project_urls": {
"Homepage": "https://github.com/patrickvonplaten/controlnet_aux"
},
"split_keywords": [
"deep",
"learning"
],
"urls": [
{
"comment_text": "",
"digests": {
"blake2b_256": "fc7c62597ce7e907292758b2ff8985fb154e4283607a031d4c95cc98e3d42e20",
"md5": "63d5434f54a153b1553b2a3e9b1a3ee2",
"sha256": "c382c2a3cc84a0f8ebde5a850df7c285b6a653d63847eec3f469d156abdaf582"
},
"downloads": -1,
"filename": "controlnet_aux_voltaml-0.3.2-py3-none-any.whl",
"has_sig": false,
"md5_digest": "63d5434f54a153b1553b2a3e9b1a3ee2",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": ">=3.7.0",
"size": 192294,
"upload_time": "2023-05-08T09:20:16",
"upload_time_iso_8601": "2023-05-08T09:20:16.212014Z",
"url": "https://files.pythonhosted.org/packages/fc/7c/62597ce7e907292758b2ff8985fb154e4283607a031d4c95cc98e3d42e20/controlnet_aux_voltaml-0.3.2-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": "",
"digests": {
"blake2b_256": "f5fe427d7d2ce1ca97f1c4d6c9b54a6fc070d97fb34c769cba8269e661a375ed",
"md5": "3ad4565103151a10c32ad1c62e31a9bb",
"sha256": "712f575b14042b7aa74e7b13ad04c4bc1de8bc9f4c0d02b52da4363026bba51b"
},
"downloads": -1,
"filename": "controlnet_aux_voltaml-0.3.2.tar.gz",
"has_sig": false,
"md5_digest": "3ad4565103151a10c32ad1c62e31a9bb",
"packagetype": "sdist",
"python_version": "source",
"requires_python": ">=3.7.0",
"size": 134115,
"upload_time": "2023-05-08T09:20:18",
"upload_time_iso_8601": "2023-05-08T09:20:18.506246Z",
"url": "https://files.pythonhosted.org/packages/f5/fe/427d7d2ce1ca97f1c4d6c9b54a6fc070d97fb34c769cba8269e661a375ed/controlnet_aux_voltaml-0.3.2.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2023-05-08 09:20:18",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "patrickvonplaten",
"github_project": "controlnet_aux",
"travis_ci": false,
"coveralls": false,
"github_actions": false,
"lcname": "controlnet-aux-voltaml"
}