crop-coach


Namecrop-coach JSON
Version 0.0.8 PyPI version JSON
download
home_page
Summarycrop_coach : A crop growth simulation model turn into OpenAI Gym environment
upload_time2023-02-11 11:59:48
maintainer
docs_urlNone
authorAgriEdge (Noureddine Ech-chouky)
requires_python
license
keywords python reinforcement learning gym environment pcse crop management precision agriculture wofost
VCS
bugtrack_url
requirements No requirements were recorded.
Travis-CI No Travis.
coveralls test coverage No coveralls.
            
# CropCoach



Developed by Noureddine Ech-chouky, AgriEdge(c) 2022



## Initialing CropCoach with default parameters:



```python

import gym



# -- Initializing the environment : with the default parameters

env = gym.make("CropCoach-v0")

```



## Examples of How To Use (Alpha Version)



Initiation the environment



```python



import gym



# -- Initializing the environment : with the default parameters

env = gym.make("CropCoach-v0")



# -- Sample from observation space :

print(env.observation_space.sample())



# -- Sample from action space :

print(env.action_space.sample())



```



Simple Rl loop



```python



import gym

import CropCoach



import shutup



shutup.please()





# -- Define the env :

env = gym.make("CropCoach-v0")







# -- Test the env : the rewarding mecanism

episodes = 10

for episode in range(1, episodes + 1):

    state = env.reset()

    done = False

    score = 0



    while not done:

        # env.render()

        action = env.action_space.sample()

        # action = np.array([1000])

        print(f"----------- action : {action} -------------\n")



        n_state, reward, done, info = env.step(action)

        print(f"----------- done : {done} -------------\n")

        print(f"----------- state : {n_state} -------------\n")

        print(f"----------- reward : {reward} -------------\n")

        score += reward



    print(f"Episode --:{episode} Score --:{score}")

```



Train agents Using stable baselines3 : you need to install stable baselines3 and configure wandb platform, see : [my wofost project](https://github.com/nour3467/Rl-wofost-Agriculture-Recomender)



```python



# -- Importing dependencies :

import gym

from CropCoach

import numpy as np

import random

import os

import time

from pprint import pprint

from stable_baselines3.common.callbacks import BaseCallback

from stable_baselines3 import A2C, PPO





def create_list_dir(paths_list):

    for path in paths_list:

        if not os.path.exists(path):

            os.makedirs(path)





# logs folder :

from datetime import datetime



# datetime object containing current date and time

now = datetime.now().strftime("%a_%b_%Y_%I_%M_%p")





Test_Objective = "Default_PPO_vs_A2C_MultiDiscrete"



logs_dir = f"logs_{Test_Objective}_{now}"

models_dir = f"models_{Test_Objective}_{now}"

paths_list = [logs_dir, models_dir]



# create the logs and models folders :

create_list_dir(paths_list)





# # -- Init the env  :

env = gym.make("wofost-v0")



# -- Use weights and biases track training and evaluation :

import wandb

from wandb.integration.sb3 import WandbCallback





# -- Init wandb configuration :

config = {"policy_type": "MlpPolicy", "total_timesteps": 10000}



experiment_name = f"{Test_Objective}_{now}"





run = wandb.init(

    name=experiment_name,

    project="Paper_Experiments",

    config=config,

    sync_tensorboard=True,  # auto-upload sb3's tensorboard metrics

    save_code=True,  # optional

)





TIMESTEPS = config["total_timesteps"]



a2c_agent = A2C("MlpPolicy", env, tensorboard_log=logs_dir, verbose=1)



ppo_agent = PPO("MlpPolicy", env, tensorboard_log=logs_dir, verbose=1)





print("Training the A2C agent...")

a2c_agent.learn(

    total_timesteps=TIMESTEPS,

    tb_log_name="A2C",

    callback=WandbCallback(

        gradient_save_freq=2,

        log="all",

        verbose=1,

    ),

)

a2c_agent.save(f"{models_dir}/a2c_{TIMESTEPS}")

print("Done training the A2C agent")



print("Training the PPO agent...")

ppo_agent.learn(

    total_timesteps=TIMESTEPS,

    tb_log_name="PPO",

    callback=WandbCallback(

        gradient_save_freq=2,

        log="all",

        verbose=1,

    ),

)

ppo_agent.save(f"{models_dir}/ppo_{TIMESTEPS}")

print("Done training the PPO agent")



run.finish()



env.close()

```



Changing the default parameters :



```python



import gym

import crop_coach



import shutup



shutup.please()











crop_path = "C:/Users/noureddine/Desktop/default_data/crop/crop.cab"

site_path = "C:/Users/noureddine/Desktop/default_data/site/site.cab"

soil_path = "C:/Users/noureddine/Desktop/default_data/soil/soil.cab"







args={

        "files_paths": {

            "site":site_path, "soil":soil_path, "crop":crop_path

            },

        "sample_year":True,

        "Agromanager_dict":{

            "crop_name": "wheat","crop_variety": "Winter_wheat_101","campaign_start_date": "-01-01","crop_start_type":"emergence","emergence_date": "-03-31","crop_end_type": "harvest","harvest_date": "-08-11", "max_duration": 300

        },

        "Costs_dict": {"Irrigation": 150, "N": 8, "P": 8.5, "K": 7, "Selling": 2.5},

        "Discount_factors_dict": {"Irrigation": 1, "N": 1, "P": 1, "K": 1},

        "year": 2019,

        "years_count": 1,

    }



# -- Define the env :

env = gym.make("CropCoach-v0", **args)



# -- Test the env : the rewarding mecanism

episodes = 10

for episode in range(1, episodes + 1):

    state = env.reset()

    done = False

    score = 0



    while not done:

        env.render()

        action = env.action_space.sample()

        # action = np.array([1000])

        print(f"----------- action : {action} -------------\n")



        n_state, reward, done, info = env.step(action)

        print(f"----------- done : {done} -------------\n")

        print(f"----------- state : {n_state} -------------\n")

        print(f"----------- reward : {reward} -------------\n")

        score += reward



    print(f"Episode --:{episode} Score --:{score}")

```

            

Raw data

            {
    "_id": null,
    "home_page": "",
    "name": "crop-coach",
    "maintainer": "",
    "docs_url": null,
    "requires_python": "",
    "maintainer_email": "",
    "keywords": "python,Reinforcement learning,gym environment,pcse,crop management,precision agriculture,wofost",
    "author": "AgriEdge (Noureddine Ech-chouky)",
    "author_email": "<noureddine.echchuky@um5r.ac.ma>",
    "download_url": "https://files.pythonhosted.org/packages/a7/90/19f06b31113227af527048041b14150dd3aa950c5c124a08958aa0b72eba/crop_coach-0.0.8.tar.gz",
    "platform": null,
    "description": "\n# CropCoach\n\n\n\nDeveloped by Noureddine Ech-chouky, AgriEdge(c) 2022\n\n\n\n## Initialing CropCoach with default parameters:\n\n\n\n```python\n\nimport gym\n\n\n\n# -- Initializing the environment : with the default parameters\n\nenv = gym.make(\"CropCoach-v0\")\n\n```\n\n\n\n## Examples of How To Use (Alpha Version)\n\n\n\nInitiation the environment\n\n\n\n```python\n\n\n\nimport gym\n\n\n\n# -- Initializing the environment : with the default parameters\n\nenv = gym.make(\"CropCoach-v0\")\n\n\n\n# -- Sample from observation space :\n\nprint(env.observation_space.sample())\n\n\n\n# -- Sample from action space :\n\nprint(env.action_space.sample())\n\n\n\n```\n\n\n\nSimple Rl loop\n\n\n\n```python\n\n\n\nimport gym\n\nimport CropCoach\n\n\n\nimport shutup\n\n\n\nshutup.please()\n\n\n\n\n\n# -- Define the env :\n\nenv = gym.make(\"CropCoach-v0\")\n\n\n\n\n\n\n\n# -- Test the env : the rewarding mecanism\n\nepisodes = 10\n\nfor episode in range(1, episodes + 1):\n\n    state = env.reset()\n\n    done = False\n\n    score = 0\n\n\n\n    while not done:\n\n        # env.render()\n\n        action = env.action_space.sample()\n\n        # action = np.array([1000])\n\n        print(f\"----------- action : {action} -------------\\n\")\n\n\n\n        n_state, reward, done, info = env.step(action)\n\n        print(f\"----------- done : {done} -------------\\n\")\n\n        print(f\"----------- state : {n_state} -------------\\n\")\n\n        print(f\"----------- reward : {reward} -------------\\n\")\n\n        score += reward\n\n\n\n    print(f\"Episode --:{episode} Score --:{score}\")\n\n```\n\n\n\nTrain agents Using stable baselines3 : you need to install stable baselines3 and configure wandb platform, see : [my wofost project](https://github.com/nour3467/Rl-wofost-Agriculture-Recomender)\n\n\n\n```python\n\n\n\n# -- Importing dependencies :\n\nimport gym\n\nfrom CropCoach\n\nimport numpy as np\n\nimport random\n\nimport os\n\nimport time\n\nfrom pprint import pprint\n\nfrom stable_baselines3.common.callbacks import BaseCallback\n\nfrom stable_baselines3 import A2C, PPO\n\n\n\n\n\ndef create_list_dir(paths_list):\n\n    for path in paths_list:\n\n        if not os.path.exists(path):\n\n            os.makedirs(path)\n\n\n\n\n\n# logs folder :\n\nfrom datetime import datetime\n\n\n\n# datetime object containing current date and time\n\nnow = datetime.now().strftime(\"%a_%b_%Y_%I_%M_%p\")\n\n\n\n\n\nTest_Objective = \"Default_PPO_vs_A2C_MultiDiscrete\"\n\n\n\nlogs_dir = f\"logs_{Test_Objective}_{now}\"\n\nmodels_dir = f\"models_{Test_Objective}_{now}\"\n\npaths_list = [logs_dir, models_dir]\n\n\n\n# create the logs and models folders :\n\ncreate_list_dir(paths_list)\n\n\n\n\n\n# # -- Init the env  :\n\nenv = gym.make(\"wofost-v0\")\n\n\n\n# -- Use weights and biases track training and evaluation :\n\nimport wandb\n\nfrom wandb.integration.sb3 import WandbCallback\n\n\n\n\n\n# -- Init wandb configuration :\n\nconfig = {\"policy_type\": \"MlpPolicy\", \"total_timesteps\": 10000}\n\n\n\nexperiment_name = f\"{Test_Objective}_{now}\"\n\n\n\n\n\nrun = wandb.init(\n\n    name=experiment_name,\n\n    project=\"Paper_Experiments\",\n\n    config=config,\n\n    sync_tensorboard=True,  # auto-upload sb3's tensorboard metrics\n\n    save_code=True,  # optional\n\n)\n\n\n\n\n\nTIMESTEPS = config[\"total_timesteps\"]\n\n\n\na2c_agent = A2C(\"MlpPolicy\", env, tensorboard_log=logs_dir, verbose=1)\n\n\n\nppo_agent = PPO(\"MlpPolicy\", env, tensorboard_log=logs_dir, verbose=1)\n\n\n\n\n\nprint(\"Training the A2C agent...\")\n\na2c_agent.learn(\n\n    total_timesteps=TIMESTEPS,\n\n    tb_log_name=\"A2C\",\n\n    callback=WandbCallback(\n\n        gradient_save_freq=2,\n\n        log=\"all\",\n\n        verbose=1,\n\n    ),\n\n)\n\na2c_agent.save(f\"{models_dir}/a2c_{TIMESTEPS}\")\n\nprint(\"Done training the A2C agent\")\n\n\n\nprint(\"Training the PPO agent...\")\n\nppo_agent.learn(\n\n    total_timesteps=TIMESTEPS,\n\n    tb_log_name=\"PPO\",\n\n    callback=WandbCallback(\n\n        gradient_save_freq=2,\n\n        log=\"all\",\n\n        verbose=1,\n\n    ),\n\n)\n\nppo_agent.save(f\"{models_dir}/ppo_{TIMESTEPS}\")\n\nprint(\"Done training the PPO agent\")\n\n\n\nrun.finish()\n\n\n\nenv.close()\n\n```\n\n\n\nChanging the default parameters :\n\n\n\n```python\n\n\n\nimport gym\n\nimport crop_coach\n\n\n\nimport shutup\n\n\n\nshutup.please()\n\n\n\n\n\n\n\n\n\n\n\ncrop_path = \"C:/Users/noureddine/Desktop/default_data/crop/crop.cab\"\n\nsite_path = \"C:/Users/noureddine/Desktop/default_data/site/site.cab\"\n\nsoil_path = \"C:/Users/noureddine/Desktop/default_data/soil/soil.cab\"\n\n\n\n\n\n\n\nargs={\n\n        \"files_paths\": {\n\n            \"site\":site_path, \"soil\":soil_path, \"crop\":crop_path\n\n            },\n\n        \"sample_year\":True,\n\n        \"Agromanager_dict\":{\n\n            \"crop_name\": \"wheat\",\"crop_variety\": \"Winter_wheat_101\",\"campaign_start_date\": \"-01-01\",\"crop_start_type\":\"emergence\",\"emergence_date\": \"-03-31\",\"crop_end_type\": \"harvest\",\"harvest_date\": \"-08-11\", \"max_duration\": 300\n\n        },\n\n        \"Costs_dict\": {\"Irrigation\": 150, \"N\": 8, \"P\": 8.5, \"K\": 7, \"Selling\": 2.5},\n\n        \"Discount_factors_dict\": {\"Irrigation\": 1, \"N\": 1, \"P\": 1, \"K\": 1},\n\n        \"year\": 2019,\n\n        \"years_count\": 1,\n\n    }\n\n\n\n# -- Define the env :\n\nenv = gym.make(\"CropCoach-v0\", **args)\n\n\n\n# -- Test the env : the rewarding mecanism\n\nepisodes = 10\n\nfor episode in range(1, episodes + 1):\n\n    state = env.reset()\n\n    done = False\n\n    score = 0\n\n\n\n    while not done:\n\n        env.render()\n\n        action = env.action_space.sample()\n\n        # action = np.array([1000])\n\n        print(f\"----------- action : {action} -------------\\n\")\n\n\n\n        n_state, reward, done, info = env.step(action)\n\n        print(f\"----------- done : {done} -------------\\n\")\n\n        print(f\"----------- state : {n_state} -------------\\n\")\n\n        print(f\"----------- reward : {reward} -------------\\n\")\n\n        score += reward\n\n\n\n    print(f\"Episode --:{episode} Score --:{score}\")\n\n```\n",
    "bugtrack_url": null,
    "license": "",
    "summary": "crop_coach : A crop growth simulation model turn into OpenAI Gym environment",
    "version": "0.0.8",
    "split_keywords": [
        "python",
        "reinforcement learning",
        "gym environment",
        "pcse",
        "crop management",
        "precision agriculture",
        "wofost"
    ],
    "urls": [
        {
            "comment_text": "",
            "digests": {
                "blake2b_256": "5ccdc75405ea124da67bb70cca2da1b26191f17e518815c980555268cdf4c4e8",
                "md5": "1d53f6eaf39b8205f5520dfc332cfd99",
                "sha256": "c1510718d606afbd7bf012df41a6c98dc988f9cfb4a8afe8cb003573881f239a"
            },
            "downloads": -1,
            "filename": "crop_coach-0.0.8-py3-none-any.whl",
            "has_sig": false,
            "md5_digest": "1d53f6eaf39b8205f5520dfc332cfd99",
            "packagetype": "bdist_wheel",
            "python_version": "py3",
            "requires_python": null,
            "size": 307921,
            "upload_time": "2023-02-11T11:59:44",
            "upload_time_iso_8601": "2023-02-11T11:59:44.387724Z",
            "url": "https://files.pythonhosted.org/packages/5c/cd/c75405ea124da67bb70cca2da1b26191f17e518815c980555268cdf4c4e8/crop_coach-0.0.8-py3-none-any.whl",
            "yanked": false,
            "yanked_reason": null
        },
        {
            "comment_text": "",
            "digests": {
                "blake2b_256": "a79019f06b31113227af527048041b14150dd3aa950c5c124a08958aa0b72eba",
                "md5": "08b427b87dfb2faf071695f397c975c9",
                "sha256": "6236d0be41b6b71a79b5cd6e3d4b0f16515d6c18c025a5e7eacb49c757295e64"
            },
            "downloads": -1,
            "filename": "crop_coach-0.0.8.tar.gz",
            "has_sig": false,
            "md5_digest": "08b427b87dfb2faf071695f397c975c9",
            "packagetype": "sdist",
            "python_version": "source",
            "requires_python": null,
            "size": 247379,
            "upload_time": "2023-02-11T11:59:48",
            "upload_time_iso_8601": "2023-02-11T11:59:48.687648Z",
            "url": "https://files.pythonhosted.org/packages/a7/90/19f06b31113227af527048041b14150dd3aa950c5c124a08958aa0b72eba/crop_coach-0.0.8.tar.gz",
            "yanked": false,
            "yanked_reason": null
        }
    ],
    "upload_time": "2023-02-11 11:59:48",
    "github": false,
    "gitlab": false,
    "bitbucket": false,
    "lcname": "crop-coach"
}
        
Elapsed time: 0.04946s