Name | echolib JSON |
Version |
0.2.0
JSON |
| download |
home_page | https://github.com/ayoubachak/echolib |
Summary | An AI-driven library for generating content using HuggingFace and LMStudio models. |
upload_time | 2024-12-07 14:57:38 |
maintainer | None |
docs_url | None |
author | Ayoub Achak |
requires_python | >=3.8 |
license | None |
keywords |
|
VCS |
|
bugtrack_url |
|
requirements |
No requirements were recorded.
|
Travis-CI |
No Travis.
|
coveralls test coverage |
No coveralls.
|
# Run
make sure to have the following 3 files :
- `hf_models.json`
```json
[
{
"id" : 1,
"name" : "Mistral v0.2 ( HuggingFace )",
"type" : "HUGGINGFACE",
"kwargs" : {
"api_url": "https://api-inference.huggingface.co/models",
"headers": {
"Authorization": "Bearer YOUR_API_KEY",
"Content-Type": "application/json"
},
"model_huggingface_id": "mistralai/Mistral-7B-Instruct-v0.2",
"default_parameters": {
"max_length": -1,
"max_new_tokens": 250,
"temperature": 1e-8,
"use_cache": true,
"wait_for_model": true
}
},
"preset" : 1
},
{
"id" : 2,
"name" : "Mistral v0.3 ( HuggingFace )",
"type" : "HUGGINGFACE",
"kwargs" : {
"api_url": "https://api-inference.huggingface.co/models",
"headers": {
"Authorization": "Bearer YOUR_API_KEY",
"Content-Type": "application/json"
},
"model_huggingface_id": "mistralai/Mistral-7B-Instruct-v0.3",
"default_parameters": {
"max_length": -1,
"max_new_tokens": 250,
"temperature": 1e-8,
"use_cache": true,
"wait_for_model": true
}
},
"preset" : 1
},
{
"id" : 3,
"name" : "Phi 3 mini ( HuggingFace )",
"type" : "HUGGINGFACE",
"kwargs" : {
"api_url": "https://api-inference.huggingface.co/models",
"headers": {
"Authorization": "Bearer YOUR_API_KEY",
"Content-Type": "application/json"
},
"model_huggingface_id": "microsoft/Phi-3-mini-4k-instruct",
"default_parameters": {
"max_length": -1,
"max_new_tokens": 250,
"temperature": 1e-8,
"use_cache": true,
"wait_for_model": true
}
},
"preset" : 2
}
]
```
- `presets.json`
```json
[
{
"id" : 1,
"name" : "Mistral Instruct",
"input_prefix" : "[INST]",
"input_suffix" : "[/INST]",
"antiprompt" : "[INST]",
"pre_prompt" : "",
"pre_prompt_prefix" : "",
"pre_prompt_suffix" : ""
},
{
"id" : 2,
"name" : "Phi 3",
"input_prefix" : "<|user|>\n",
"input_suffix" : "<|end|>\n<|assistant|>\n",
"antiprompt" : "\"<|end|>\", \"<|assistant|>\"",
"pre_prompt" : "",
"pre_prompt_prefix" : "<|end|>\n",
"pre_prompt_suffix" : "<|system|>\n"
}
]
```
- `tokens.json` : More tokens mean more reliability and fault tolerence, in my case I used 10 HuggingFace tokens.
```json
[
{
"id": 1,
"name": "First",
"value": "A HuggingFace api token"
},
{
"id": 2,
"name": "Second",
"value": "Another token just to make sure the requests don't fail"
},
{
"id": 3,
"name": "Third",
"value": "a third token as a backup"
}
... Add more as needed
]
```
Raw data
{
"_id": null,
"home_page": "https://github.com/ayoubachak/echolib",
"name": "echolib",
"maintainer": null,
"docs_url": null,
"requires_python": ">=3.8",
"maintainer_email": null,
"keywords": null,
"author": "Ayoub Achak",
"author_email": "ayoub.achak01@example.com",
"download_url": "https://files.pythonhosted.org/packages/46/a8/11fdfa11f5cb96d1bc66676ca057c43ad819fc2aaf9f1794e550c993e942/echolib-0.2.0.tar.gz",
"platform": null,
"description": "# Run\n\nmake sure to have the following 3 files :\n- `hf_models.json` \n```json\n[\n {\n \"id\" : 1,\n \"name\" : \"Mistral v0.2 ( HuggingFace )\",\n \"type\" : \"HUGGINGFACE\",\n \"kwargs\" : {\n \"api_url\": \"https://api-inference.huggingface.co/models\",\n \"headers\": {\n \"Authorization\": \"Bearer YOUR_API_KEY\",\n \"Content-Type\": \"application/json\"\n },\n \"model_huggingface_id\": \"mistralai/Mistral-7B-Instruct-v0.2\",\n \"default_parameters\": {\n \"max_length\": -1,\n \"max_new_tokens\": 250,\n \"temperature\": 1e-8,\n \"use_cache\": true,\n \"wait_for_model\": true\n }\n },\n \"preset\" : 1\n },\n {\n \"id\" : 2,\n \"name\" : \"Mistral v0.3 ( HuggingFace )\",\n \"type\" : \"HUGGINGFACE\",\n \"kwargs\" : {\n \"api_url\": \"https://api-inference.huggingface.co/models\",\n \"headers\": {\n \"Authorization\": \"Bearer YOUR_API_KEY\",\n \"Content-Type\": \"application/json\"\n },\n \"model_huggingface_id\": \"mistralai/Mistral-7B-Instruct-v0.3\",\n \"default_parameters\": {\n \"max_length\": -1,\n \"max_new_tokens\": 250,\n \"temperature\": 1e-8,\n \"use_cache\": true,\n \"wait_for_model\": true\n }\n },\n \"preset\" : 1\n },\n {\n \"id\" : 3,\n \"name\" : \"Phi 3 mini ( HuggingFace )\",\n \"type\" : \"HUGGINGFACE\",\n \"kwargs\" : {\n \"api_url\": \"https://api-inference.huggingface.co/models\",\n \"headers\": {\n \"Authorization\": \"Bearer YOUR_API_KEY\",\n \"Content-Type\": \"application/json\"\n },\n \"model_huggingface_id\": \"microsoft/Phi-3-mini-4k-instruct\",\n \"default_parameters\": {\n \"max_length\": -1,\n \"max_new_tokens\": 250,\n \"temperature\": 1e-8,\n \"use_cache\": true,\n \"wait_for_model\": true\n }\n },\n \"preset\" : 2\n }\n]\n```\n- `presets.json`\n```json\n[\n {\n \"id\" : 1,\n \"name\" : \"Mistral Instruct\",\n \"input_prefix\" : \"[INST]\",\n \"input_suffix\" : \"[/INST]\",\n \"antiprompt\" : \"[INST]\",\n \"pre_prompt\" : \"\",\n \"pre_prompt_prefix\" : \"\",\n \"pre_prompt_suffix\" : \"\"\n },\n {\n \"id\" : 2,\n \"name\" : \"Phi 3\",\n \"input_prefix\" : \"<|user|>\\n\",\n \"input_suffix\" : \"<|end|>\\n<|assistant|>\\n\",\n \"antiprompt\" : \"\\\"<|end|>\\\", \\\"<|assistant|>\\\"\",\n \"pre_prompt\" : \"\",\n \"pre_prompt_prefix\" : \"<|end|>\\n\",\n \"pre_prompt_suffix\" : \"<|system|>\\n\"\n }\n]\n```\n- `tokens.json` : More tokens mean more reliability and fault tolerence, in my case I used 10 HuggingFace tokens.\n```json\n[\n {\n \"id\": 1,\n \"name\": \"First\",\n \"value\": \"A HuggingFace api token\"\n },\n {\n \"id\": 2,\n \"name\": \"Second\",\n \"value\": \"Another token just to make sure the requests don't fail\"\n },\n {\n \"id\": 3,\n \"name\": \"Third\",\n \"value\": \"a third token as a backup\"\n }\n ... Add more as needed\n]\n```\n\n",
"bugtrack_url": null,
"license": null,
"summary": "An AI-driven library for generating content using HuggingFace and LMStudio models.",
"version": "0.2.0",
"project_urls": {
"Homepage": "https://github.com/ayoubachak/echolib"
},
"split_keywords": [],
"urls": [
{
"comment_text": "",
"digests": {
"blake2b_256": "e71e96f55c388be41b86f61b343a36309244da2e505f0a6a5490dd1f8a154624",
"md5": "21a6fc0c063f6bf85c2f5ee3cf3db495",
"sha256": "e937428b2769adeb59e7cc37b243d7ae43614af14534d525f9f6c17a373244e1"
},
"downloads": -1,
"filename": "echolib-0.2.0-py3-none-any.whl",
"has_sig": false,
"md5_digest": "21a6fc0c063f6bf85c2f5ee3cf3db495",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": ">=3.8",
"size": 16127,
"upload_time": "2024-12-07T14:57:36",
"upload_time_iso_8601": "2024-12-07T14:57:36.910338Z",
"url": "https://files.pythonhosted.org/packages/e7/1e/96f55c388be41b86f61b343a36309244da2e505f0a6a5490dd1f8a154624/echolib-0.2.0-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": "",
"digests": {
"blake2b_256": "46a811fdfa11f5cb96d1bc66676ca057c43ad819fc2aaf9f1794e550c993e942",
"md5": "04cce3dc639ae666b083f37b5c29d5e8",
"sha256": "33bc4b2db68cd6670b55ce649fc7d7baafca39be9f31041e8bfeca119af2181b"
},
"downloads": -1,
"filename": "echolib-0.2.0.tar.gz",
"has_sig": false,
"md5_digest": "04cce3dc639ae666b083f37b5c29d5e8",
"packagetype": "sdist",
"python_version": "source",
"requires_python": ">=3.8",
"size": 13109,
"upload_time": "2024-12-07T14:57:38",
"upload_time_iso_8601": "2024-12-07T14:57:38.589645Z",
"url": "https://files.pythonhosted.org/packages/46/a8/11fdfa11f5cb96d1bc66676ca057c43ad819fc2aaf9f1794e550c993e942/echolib-0.2.0.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2024-12-07 14:57:38",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "ayoubachak",
"github_project": "echolib",
"travis_ci": false,
"coveralls": false,
"github_actions": true,
"lcname": "echolib"
}