gaussian-process


Namegaussian-process JSON
Version 0.0.14 PyPI version JSON
download
home_pagehttps://github.com/LucaCappelletti94/gaussian_process
SummaryWrapper for sklearn.gp_minimize for a simpler parameter specification using nested dictionaries.
upload_time2020-02-15 22:44:55
maintainer
docs_urlNone
authorLuca Cappelletti
requires_python
licenseMIT
keywords
VCS
bugtrack_url
requirements No requirements were recorded.
Travis-CI No Travis.
coveralls test coverage No coveralls.
            gaussian_process
=========================================================================================
|travis| |sonar_quality| |sonar_maintainability| |codacy|
|code_climate_maintainability| |pip| |downloads|

Wrapper for `"sklearn.gp_minimize"` for a simpler parameter specification using nested dictionaries.

How do I install this package?
----------------------------------------------
As usual, just download it using pip:

.. code:: shell

    pip install gaussian_process

Tests Coverage
----------------------------------------------
Since some software handling coverages sometime get slightly different results, here's three of them:

|coveralls| |sonar_coverage| |code_climate_coverage|

Keras model optimization using a gaussian process
-------------------------------------------------------------
The following example show a complete usage of GaussianProcess
for tuning the parameters of a Keras model.

.. code:: python

    import silence_tensorflow
    from keras.models import Sequential
    from keras.layers import Dense, Dropout
    from keras.datasets import boston_housing
    from extra_keras_utils import set_seed
    from typing import Callable, Dict
    import numpy as np
    from holdouts_generator import holdouts_generator, random_holdouts
    from gaussian_process import TQDMGaussianProcess, Space, GaussianProcess


    class MLP:
        def __init__(self, holdouts:Callable):
            self._holdouts = holdouts
        
        def mlp(self, dense_layers:Dict, dropout_rate:float)->Sequential:
            return Sequential([
                *[Dense(**kwargs) for kwargs in dense_layers],
                Dropout(dropout_rate),
                Dense(1, activation="relu"),
            ])

        def model_score(self, train:np.ndarray, test:np.ndarray, structure:Dict, fit:Dict):
            model = self.mlp(**structure)
            model.compile(
                optimizer="nadam",
                loss="mse"
            )

            return model.fit(
                *train,
                epochs=1,
                validation_data=test,
                verbose=0,
                **fit
            ).history["val_loss"][-1]


        def score(self, structure:Dict, fit:Dict):
            return -np.mean([
                self.model_score(training, test, structure, fit) for (training, test), _ in self._holdouts()
            ])

    if __name__ == "__main__":
        set_seed(42)

        generator = holdouts_generator(
            *boston_housing.load_data()[0],
            holdouts=random_holdouts([0.1], [2])
        )

        mlp = MLP(generator)

        space = Space({
            "structure":{
                "dense_layers":[{
                    "units":(8,16,32),
                    "activation":("relu", "selu")
                },
                {
                    "units":[8,16,32],
                    "activation":("relu", "selu")
                }],
                "dropout_rate":[0.0,1.0]
            },
            "fit":{
                "batch_size":[100,1000]
            }
        })

        gp = GaussianProcess(mlp.score, space)
        
        n_calls = 3
        results = gp.minimize(
            n_calls=n_calls,
            n_random_starts=1,
            callback=[TQDMGaussianProcess(n_calls=n_calls)],
            random_state=42
        )
        results = gp.minimize(
            n_calls=n_calls,
            n_random_starts=1,
            callback=[TQDMGaussianProcess(n_calls=n_calls)],
            random_state=42
        )
        print(gp.best_parameters)
        print(gp.best_optimized_parameters)
        gp.clear_cache()

.. |travis| image:: https://travis-ci.org/LucaCappelletti94/gaussian_process.png
   :target: https://travis-ci.org/LucaCappelletti94/gaussian_process
   :alt: Travis CI build

.. |sonar_quality| image:: https://sonarcloud.io/api/project_badges/measure?project=LucaCappelletti94_gaussian_process&metric=alert_status
    :target: https://sonarcloud.io/dashboard/index/LucaCappelletti94_gaussian_process
    :alt: SonarCloud Quality

.. |sonar_maintainability| image:: https://sonarcloud.io/api/project_badges/measure?project=LucaCappelletti94_gaussian_process&metric=sqale_rating
    :target: https://sonarcloud.io/dashboard/index/LucaCappelletti94_gaussian_process
    :alt: SonarCloud Maintainability

.. |sonar_coverage| image:: https://sonarcloud.io/api/project_badges/measure?project=LucaCappelletti94_gaussian_process&metric=coverage
    :target: https://sonarcloud.io/dashboard/index/LucaCappelletti94_gaussian_process
    :alt: SonarCloud Coverage

.. |coveralls| image:: https://coveralls.io/repos/github/LucaCappelletti94/gaussian_process/badge.svg?branch=master
    :target: https://coveralls.io/github/LucaCappelletti94/gaussian_process?branch=master
    :alt: Coveralls Coverage

.. |pip| image:: https://badge.fury.io/py/gaussian-process.svg
    :target: https://badge.fury.io/py/gaussian-process
    :alt: Pypi project

.. |downloads| image:: https://pepy.tech/badge/gaussian-process
    :target: https://pepy.tech/badge/gaussian-process
    :alt: Pypi total project downloads 

.. |codacy| image:: https://api.codacy.com/project/badge/Grade/0a674ed703f44793a27936462ca05080
    :target: https://www.codacy.com/app/LucaCappelletti94/gaussian_process?utm_source=github.com&utm_medium=referral&utm_content=LucaCappelletti94/gaussian_process&utm_campaign=Badge_Grade
    :alt: Codacy Maintainability

.. |code_climate_maintainability| image:: https://api.codeclimate.com/v1/badges/aabe32e918c9ba7cd773/maintainability
    :target: https://codeclimate.com/github/LucaCappelletti94/gaussian_process/maintainability
    :alt: Maintainability

.. |code_climate_coverage| image:: https://api.codeclimate.com/v1/badges/aabe32e918c9ba7cd773/test_coverage
    :target: https://codeclimate.com/github/LucaCappelletti94/gaussian_process/test_coverage
    :alt: Code Climate Coverate
            

Raw data

            {
    "_id": null,
    "home_page": "https://github.com/LucaCappelletti94/gaussian_process",
    "name": "gaussian-process",
    "maintainer": "",
    "docs_url": null,
    "requires_python": "",
    "maintainer_email": "",
    "keywords": "",
    "author": "Luca Cappelletti",
    "author_email": "cappelletti.luca94@gmail.com",
    "download_url": "https://files.pythonhosted.org/packages/d0/1e/b7ba00f83ace6c6679656f20552d55f46c36784ac76f8f02d98c5dbc332c/gaussian_process-0.0.14.tar.gz",
    "platform": "",
    "description": "gaussian_process\n=========================================================================================\n|travis| |sonar_quality| |sonar_maintainability| |codacy|\n|code_climate_maintainability| |pip| |downloads|\n\nWrapper for `\"sklearn.gp_minimize\"` for a simpler parameter specification using nested dictionaries.\n\nHow do I install this package?\n----------------------------------------------\nAs usual, just download it using pip:\n\n.. code:: shell\n\n    pip install gaussian_process\n\nTests Coverage\n----------------------------------------------\nSince some software handling coverages sometime get slightly different results, here's three of them:\n\n|coveralls| |sonar_coverage| |code_climate_coverage|\n\nKeras model optimization using a gaussian process\n-------------------------------------------------------------\nThe following example show a complete usage of GaussianProcess\nfor tuning the parameters of a Keras model.\n\n.. code:: python\n\n    import silence_tensorflow\n    from keras.models import Sequential\n    from keras.layers import Dense, Dropout\n    from keras.datasets import boston_housing\n    from extra_keras_utils import set_seed\n    from typing import Callable, Dict\n    import numpy as np\n    from holdouts_generator import holdouts_generator, random_holdouts\n    from gaussian_process import TQDMGaussianProcess, Space, GaussianProcess\n\n\n    class MLP:\n        def __init__(self, holdouts:Callable):\n            self._holdouts = holdouts\n        \n        def mlp(self, dense_layers:Dict, dropout_rate:float)->Sequential:\n            return Sequential([\n                *[Dense(**kwargs) for kwargs in dense_layers],\n                Dropout(dropout_rate),\n                Dense(1, activation=\"relu\"),\n            ])\n\n        def model_score(self, train:np.ndarray, test:np.ndarray, structure:Dict, fit:Dict):\n            model = self.mlp(**structure)\n            model.compile(\n                optimizer=\"nadam\",\n                loss=\"mse\"\n            )\n\n            return model.fit(\n                *train,\n                epochs=1,\n                validation_data=test,\n                verbose=0,\n                **fit\n            ).history[\"val_loss\"][-1]\n\n\n        def score(self, structure:Dict, fit:Dict):\n            return -np.mean([\n                self.model_score(training, test, structure, fit) for (training, test), _ in self._holdouts()\n            ])\n\n    if __name__ == \"__main__\":\n        set_seed(42)\n\n        generator = holdouts_generator(\n            *boston_housing.load_data()[0],\n            holdouts=random_holdouts([0.1], [2])\n        )\n\n        mlp = MLP(generator)\n\n        space = Space({\n            \"structure\":{\n                \"dense_layers\":[{\n                    \"units\":(8,16,32),\n                    \"activation\":(\"relu\", \"selu\")\n                },\n                {\n                    \"units\":[8,16,32],\n                    \"activation\":(\"relu\", \"selu\")\n                }],\n                \"dropout_rate\":[0.0,1.0]\n            },\n            \"fit\":{\n                \"batch_size\":[100,1000]\n            }\n        })\n\n        gp = GaussianProcess(mlp.score, space)\n        \n        n_calls = 3\n        results = gp.minimize(\n            n_calls=n_calls,\n            n_random_starts=1,\n            callback=[TQDMGaussianProcess(n_calls=n_calls)],\n            random_state=42\n        )\n        results = gp.minimize(\n            n_calls=n_calls,\n            n_random_starts=1,\n            callback=[TQDMGaussianProcess(n_calls=n_calls)],\n            random_state=42\n        )\n        print(gp.best_parameters)\n        print(gp.best_optimized_parameters)\n        gp.clear_cache()\n\n.. |travis| image:: https://travis-ci.org/LucaCappelletti94/gaussian_process.png\n   :target: https://travis-ci.org/LucaCappelletti94/gaussian_process\n   :alt: Travis CI build\n\n.. |sonar_quality| image:: https://sonarcloud.io/api/project_badges/measure?project=LucaCappelletti94_gaussian_process&metric=alert_status\n    :target: https://sonarcloud.io/dashboard/index/LucaCappelletti94_gaussian_process\n    :alt: SonarCloud Quality\n\n.. |sonar_maintainability| image:: https://sonarcloud.io/api/project_badges/measure?project=LucaCappelletti94_gaussian_process&metric=sqale_rating\n    :target: https://sonarcloud.io/dashboard/index/LucaCappelletti94_gaussian_process\n    :alt: SonarCloud Maintainability\n\n.. |sonar_coverage| image:: https://sonarcloud.io/api/project_badges/measure?project=LucaCappelletti94_gaussian_process&metric=coverage\n    :target: https://sonarcloud.io/dashboard/index/LucaCappelletti94_gaussian_process\n    :alt: SonarCloud Coverage\n\n.. |coveralls| image:: https://coveralls.io/repos/github/LucaCappelletti94/gaussian_process/badge.svg?branch=master\n    :target: https://coveralls.io/github/LucaCappelletti94/gaussian_process?branch=master\n    :alt: Coveralls Coverage\n\n.. |pip| image:: https://badge.fury.io/py/gaussian-process.svg\n    :target: https://badge.fury.io/py/gaussian-process\n    :alt: Pypi project\n\n.. |downloads| image:: https://pepy.tech/badge/gaussian-process\n    :target: https://pepy.tech/badge/gaussian-process\n    :alt: Pypi total project downloads \n\n.. |codacy| image:: https://api.codacy.com/project/badge/Grade/0a674ed703f44793a27936462ca05080\n    :target: https://www.codacy.com/app/LucaCappelletti94/gaussian_process?utm_source=github.com&utm_medium=referral&utm_content=LucaCappelletti94/gaussian_process&utm_campaign=Badge_Grade\n    :alt: Codacy Maintainability\n\n.. |code_climate_maintainability| image:: https://api.codeclimate.com/v1/badges/aabe32e918c9ba7cd773/maintainability\n    :target: https://codeclimate.com/github/LucaCappelletti94/gaussian_process/maintainability\n    :alt: Maintainability\n\n.. |code_climate_coverage| image:: https://api.codeclimate.com/v1/badges/aabe32e918c9ba7cd773/test_coverage\n    :target: https://codeclimate.com/github/LucaCappelletti94/gaussian_process/test_coverage\n    :alt: Code Climate Coverate",
    "bugtrack_url": null,
    "license": "MIT",
    "summary": "Wrapper for sklearn.gp_minimize for a simpler parameter specification using nested dictionaries.",
    "version": "0.0.14",
    "project_urls": {
        "Homepage": "https://github.com/LucaCappelletti94/gaussian_process"
    },
    "split_keywords": [],
    "urls": [
        {
            "comment_text": "",
            "digests": {
                "blake2b_256": "d01eb7ba00f83ace6c6679656f20552d55f46c36784ac76f8f02d98c5dbc332c",
                "md5": "d89ccec7438c8c31f6c0cfbb9aaefc9b",
                "sha256": "1939ddce64bb1aaf43e33c1b1a9dba92250ba0ad4f00c5b9da86dd9d06383673"
            },
            "downloads": -1,
            "filename": "gaussian_process-0.0.14.tar.gz",
            "has_sig": false,
            "md5_digest": "d89ccec7438c8c31f6c0cfbb9aaefc9b",
            "packagetype": "sdist",
            "python_version": "source",
            "requires_python": null,
            "size": 5848,
            "upload_time": "2020-02-15T22:44:55",
            "upload_time_iso_8601": "2020-02-15T22:44:55.451326Z",
            "url": "https://files.pythonhosted.org/packages/d0/1e/b7ba00f83ace6c6679656f20552d55f46c36784ac76f8f02d98c5dbc332c/gaussian_process-0.0.14.tar.gz",
            "yanked": false,
            "yanked_reason": null
        }
    ],
    "upload_time": "2020-02-15 22:44:55",
    "github": true,
    "gitlab": false,
    "bitbucket": false,
    "codeberg": false,
    "github_user": "LucaCappelletti94",
    "github_project": "gaussian_process",
    "github_not_found": true,
    "lcname": "gaussian-process"
}
        
Elapsed time: 0.27697s