# MEMORY FOR GGUF_LLAMA AND OTHER LLAMAs
Raw data
{
"_id": null,
"home_page": "https://github.com/laelhalawani/llama_memory",
"name": "llama-memory",
"maintainer": "",
"docs_url": null,
"requires_python": "",
"maintainer_email": "",
"keywords": "vector database,vector db,rag,long term ai memory,llama,ai,artificial intelligence,natural language processing,nlp,quantization,cpu,deployment,inference,model,models,model database,model repo,model repository,model library,model libraries,gguf,llm cpu,llm",
"author": "\u0141ael Al-Halawani",
"author_email": "laelhalawani@gmail.com",
"download_url": "https://files.pythonhosted.org/packages/8e/bf/76fa0e9a2dd5e300c05765931539271b824ae5d3aa26c4fb91475bf70ed8/llama_memory-0.0.1a1.tar.gz",
"platform": null,
"description": "# MEMORY FOR GGUF_LLAMA AND OTHER LLAMAs\n",
"bugtrack_url": null,
"license": "",
"summary": "Easy deployment of quantized llama models on cpu",
"version": "0.0.1a1",
"project_urls": {
"Homepage": "https://github.com/laelhalawani/llama_memory"
},
"split_keywords": [
"vector database",
"vector db",
"rag",
"long term ai memory",
"llama",
"ai",
"artificial intelligence",
"natural language processing",
"nlp",
"quantization",
"cpu",
"deployment",
"inference",
"model",
"models",
"model database",
"model repo",
"model repository",
"model library",
"model libraries",
"gguf",
"llm cpu",
"llm"
],
"urls": [
{
"comment_text": "",
"digests": {
"blake2b_256": "081d8ad8fa40ed5d114fc098917c04f4a795f42fa7337cff9d45eaaffaf0f6b0",
"md5": "2afa692069ccce0fff6c78e6481eb9f9",
"sha256": "ec3c60a88c86f97be25e2ed66f9c4369716962a7bee59c49d1537895bc8cf8bc"
},
"downloads": -1,
"filename": "llama_memory-0.0.1a1-py3-none-any.whl",
"has_sig": false,
"md5_digest": "2afa692069ccce0fff6c78e6481eb9f9",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": null,
"size": 1509,
"upload_time": "2024-01-09T01:59:53",
"upload_time_iso_8601": "2024-01-09T01:59:53.521279Z",
"url": "https://files.pythonhosted.org/packages/08/1d/8ad8fa40ed5d114fc098917c04f4a795f42fa7337cff9d45eaaffaf0f6b0/llama_memory-0.0.1a1-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": "",
"digests": {
"blake2b_256": "8ebf76fa0e9a2dd5e300c05765931539271b824ae5d3aa26c4fb91475bf70ed8",
"md5": "07b55d5da77c083a1c5f97e8912aad02",
"sha256": "95e897a1f7888c41defd7a4f0bb5c0fa14a685143e9a17ffd805aed2214f5946"
},
"downloads": -1,
"filename": "llama_memory-0.0.1a1.tar.gz",
"has_sig": false,
"md5_digest": "07b55d5da77c083a1c5f97e8912aad02",
"packagetype": "sdist",
"python_version": "source",
"requires_python": null,
"size": 1604,
"upload_time": "2024-01-09T01:59:55",
"upload_time_iso_8601": "2024-01-09T01:59:55.199534Z",
"url": "https://files.pythonhosted.org/packages/8e/bf/76fa0e9a2dd5e300c05765931539271b824ae5d3aa26c4fb91475bf70ed8/llama_memory-0.0.1a1.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2024-01-09 01:59:55",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "laelhalawani",
"github_project": "llama_memory",
"travis_ci": false,
"coveralls": false,
"github_actions": true,
"requirements": [],
"lcname": "llama-memory"
}