# Ninetails
A wrapper for creating vectorized gymnasium environments.
## Installation
`pip3 install ninetails`
## Usage
```py
import gymnasium as gym
import numpy as np
from ninetails import SubProcessVectorGymnasiumEnv
def main() -> None:
"""main.
Returns:
None:
"""
# define your environment using a function that returns the environment here
env_fns = [lambda i=i: gym.make("MountainCarContinuous-v0") for i in range(1)]
# create a vectorized environment
# `strict` is useful here for debugging
vec_env = SubProcessVectorGymnasiumEnv(env_fns=env_fns, strict=True)
# define our initial termination and trunction arrays
terminations, truncations = np.array([False]), np.array([False])
# reset follows the same signature as a Gymnasium environment
observations, infos = vec_env.reset(seed=42)
for step_count in range(5000):
# sample an action, this is an np.ndarray of [num_envs, *env.action_space.shape]
actions = vec_env.sample_actions()
# similarly, the step function follows the same signature as a Gymnasium environment with the following shapes
# observations: np.ndarray of shape [num_envs, *env.observation_space.shape]
# rewards: np.ndarray of shape [num_envs, 1]
# terminations: np.ndarray of shape [num_envs, 1]
# truncations: np.ndarray of shape [num_envs, 1]
# infos: tuple[dict[str, Any]]
observations, rewards, terminations, truncations, infos = vec_env.step(actions)
# to reset underlying environments
done_ids = set(np.where(terminations).tolist() + np.where(truncations).tolist())
for id in done_ids:
# warning, you'll have to handle starting observations yourself here
reset_obs, reset_info = vec_env.reset(id)
if __name__ == "__main__":
main()
```
Raw data
{
"_id": null,
"home_page": null,
"name": "ninetails",
"maintainer": null,
"docs_url": null,
"requires_python": ">=3.9",
"maintainer_email": null,
"keywords": "Reinforcement Learning, game, RL, AI, gymnasium",
"author": null,
"author_email": "Jet <taijunjet@hotmail.com>",
"download_url": "https://files.pythonhosted.org/packages/3a/07/9ed5a4e353c5551c7eb04ab4e0e8c146e9584ea45d1d3e9a5d1ef2379e69/ninetails-0.0.8.tar.gz",
"platform": null,
"description": "# Ninetails\n\nA wrapper for creating vectorized gymnasium environments.\n\n## Installation\n\n`pip3 install ninetails`\n\n## Usage\n\n```py\nimport gymnasium as gym\nimport numpy as np\n\nfrom ninetails import SubProcessVectorGymnasiumEnv\n\n\ndef main() -> None:\n \"\"\"main.\n\n Returns:\n None:\n \"\"\"\n # define your environment using a function that returns the environment here\n env_fns = [lambda i=i: gym.make(\"MountainCarContinuous-v0\") for i in range(1)]\n\n # create a vectorized environment\n # `strict` is useful here for debugging\n vec_env = SubProcessVectorGymnasiumEnv(env_fns=env_fns, strict=True)\n\n # define our initial termination and trunction arrays\n terminations, truncations = np.array([False]), np.array([False])\n\n # reset follows the same signature as a Gymnasium environment\n observations, infos = vec_env.reset(seed=42)\n\n for step_count in range(5000):\n # sample an action, this is an np.ndarray of [num_envs, *env.action_space.shape]\n actions = vec_env.sample_actions()\n\n # similarly, the step function follows the same signature as a Gymnasium environment with the following shapes\n # observations: np.ndarray of shape [num_envs, *env.observation_space.shape]\n # rewards: np.ndarray of shape [num_envs, 1]\n # terminations: np.ndarray of shape [num_envs, 1]\n # truncations: np.ndarray of shape [num_envs, 1]\n # infos: tuple[dict[str, Any]]\n observations, rewards, terminations, truncations, infos = vec_env.step(actions)\n\n # to reset underlying environments\n done_ids = set(np.where(terminations).tolist() + np.where(truncations).tolist())\n for id in done_ids:\n # warning, you'll have to handle starting observations yourself here\n reset_obs, reset_info = vec_env.reset(id)\n\n\nif __name__ == \"__main__\":\n main()\n```\n",
"bugtrack_url": null,
"license": "MIT License",
"summary": "Wrapper for creating vectorized gymnasium environments.",
"version": "0.0.8",
"project_urls": null,
"split_keywords": [
"reinforcement learning",
" game",
" rl",
" ai",
" gymnasium"
],
"urls": [
{
"comment_text": "",
"digests": {
"blake2b_256": "86e060e6e53c2602658ab87b8ec4486f98b3b588857a913c9a0c55b46271f71a",
"md5": "8b146438258d8e82ae468fd3b3584e19",
"sha256": "a6e974958f176a89f6b5747c342f65aad335326e77d5733a4f29c429296662f9"
},
"downloads": -1,
"filename": "ninetails-0.0.8-py3-none-any.whl",
"has_sig": false,
"md5_digest": "8b146438258d8e82ae468fd3b3584e19",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": ">=3.9",
"size": 8975,
"upload_time": "2024-06-25T09:22:01",
"upload_time_iso_8601": "2024-06-25T09:22:01.814583Z",
"url": "https://files.pythonhosted.org/packages/86/e0/60e6e53c2602658ab87b8ec4486f98b3b588857a913c9a0c55b46271f71a/ninetails-0.0.8-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": "",
"digests": {
"blake2b_256": "3a079ed5a4e353c5551c7eb04ab4e0e8c146e9584ea45d1d3e9a5d1ef2379e69",
"md5": "7241851c115f9ef4b08fb5fb3ab0a4b0",
"sha256": "92e8705ab21c16c6e5e40f78ebfcc25d3bcc6c7f62f11f4c40aaf9941f4dba92"
},
"downloads": -1,
"filename": "ninetails-0.0.8.tar.gz",
"has_sig": false,
"md5_digest": "7241851c115f9ef4b08fb5fb3ab0a4b0",
"packagetype": "sdist",
"python_version": "source",
"requires_python": ">=3.9",
"size": 8048,
"upload_time": "2024-06-25T09:22:03",
"upload_time_iso_8601": "2024-06-25T09:22:03.913077Z",
"url": "https://files.pythonhosted.org/packages/3a/07/9ed5a4e353c5551c7eb04ab4e0e8c146e9584ea45d1d3e9a5d1ef2379e69/ninetails-0.0.8.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2024-06-25 09:22:03",
"github": false,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"lcname": "ninetails"
}