adaptivetesting


Nameadaptivetesting JSON
Version 1.1.0 PyPI version JSON
download
home_pageNone
Summaryadaptivetesting is a Python package that can be used to simulate and evaluate custom CAT scenarios as well as implement them in real-world testing scenarios from a single codebase
upload_time2025-07-19 08:36:06
maintainerNone
docs_urlNone
authorNone
requires_python>=3.10
licenseNone
keywords computerized-adaptive-testing item-response-theory psychology statistics
VCS
bugtrack_url
requirements No requirements were recorded.
Travis-CI No Travis.
coveralls test coverage No coveralls.
            # adaptivetesting

<img src="/docs/source/_static/logo.svg" style="width: 100%">
</img>

**An open-source Python package for simplified, customizable Computerized Adaptive Testing (CAT) using Bayesian methods.**


## Key Features

- **Bayesian Methods**: Built-in support for Bayesian ability estimation with customizable priors
- **Flexible Architecture**: Object-oriented design with abstract classes for easy extension
- **Item Response Theory**: Full support for 1PL, 2PL, 3PL, and 4PL models
- **Multiple Estimators**: 
  - Maximum Likelihood Estimation (MLE)
  - Bayesian Modal Estimation (BM)
  - Expected A Posteriori (EAP)
- **Item Selection Strategies**: Kaximum information criterion and Urry's rule
- **Simulation Framework**: Comprehensive tools for CAT simulation and evaluation
- **Real-world Application**: Direct transition from simulation to production testing
- **Stopping Criteria**: Support for standard error and test length criteria
- **Data Management**: Built-in support for CSV and pickle data formats

## Installation

Install from PyPI using pip:

```bash
pip install adaptivetesting
```

For the latest development version:

```bash
pip install git+https://github.com/condecon/adaptivetesting
```

## Requirements

- Python >= 3.10
- NumPy >= 2.0.0
- Pandas >= 2.2.0
- SciPy >= 1.15.0
- tqdm >= 4.67.1

## Quick Start

### Basic Example: Setting Up an Adaptive Test

```python
from adaptivetesting.models import ItemPool, TestItem
from adaptivetesting.implementations import TestAssembler
from adaptivetesting.math.estimators import BayesModal, NormalPrior
from adaptivetesting.simulation import Simulation, StoppingCriterion, ResultOutputFormat
import pandas as pd

# Create item pool from DataFrame
items_data = pd.DataFrame({
    "a": [1.32, 1.07, 0.84, 1.19, 0.95],  # discrimination
    "b": [-0.63, 0.18, -0.84, 0.41, -0.25],  # difficulty
    "c": [0.17, 0.10, 0.19, 0.15, 0.12],  # guessing
    "d": [0.87, 0.93, 1.0, 0.89, 0.94]   # upper asymptote
})
item_pool = ItemPool.load_from_dataframe(items_data)

# Set up adaptive test
adaptive_test = TestAssembler(
    item_pool=item_pool,
    simulation_id="sim_001",
    participant_id="participant_001",
    ability_estimator=BayesModal,
    estimator_args={"prior": NormalPrior(mean=0, sd=1)},
    true_ability_level=0.5,  # For simulation
    simulation=True
)

# Run simulation
simulation = Simulation(
    test=adaptive_test,
    test_result_output=ResultOutputFormat.CSV
)

simulation.simulate(
    criterion=StoppingCriterion.SE,
    value=0.3  # Stop when standard error <= 0.3
)

# Save results
simulation.save_test_results()
```

### Custom Prior Example

```python
from adaptivetesting.math.estimators import CustomPrior
from scipy.stats import t

# Create custom t prior
custom_prior = CustomPrior(t, 100)

# Use in estimator
adaptive_test = TestAssembler(
    item_pool=item_pool,
    simulation_id="custom_prior_sim",
    participant_id="participant_002",
    ability_estimator=BayesModal,
    estimator_args={"prior": custom_prior},
    true_ability_level=0.0,
    simulation=True
)
```

### Real-world Testing (Non-simulation) with PsychoPy

```python
from psychopy import visual, event
from psychopy.hardware import keyboard
from adaptivetesting.implementations import TestAssembler
from adaptivetesting.models import AdaptiveTest, ItemPool, TestItem
from adaptivetesting.data import CSVContext
from adaptivetesting.math.estimators import BayesModal, CustomPrior
from adaptivetesting.math.item_selection import maximum_information_criterion
from scipy.stats import t
import pandas as pd


# Create item pool from DataFrame
items_data = pd.DataFrame({
    "a": [1.32, 1.07, 0.84, 1.19, 0.95],  # discrimination
    "b": [-0.63, 0.18, -0.84, 0.41, -0.25],  # difficulty
    "c": [0.17, 0.10, 0.19, 0.15, 0.12],  # guessing
    "d": [0.87, 0.93, 1.0, 0.89, 0.94]   # upper asymptote
})
item_pool = ItemPool.load_from_dataframe(items_data)

# Create adaptive test
adaptive_test: AdaptiveTest = TestAssembler(
        item_pool=item_pool,
        simulation_id="example",
        participant_id="dummy",
        ability_estimator=BayesModal,
        estimator_args={
            "prior": CustomPrior(t, 100),
            "optimization_interval":(-10, 10)
        },
        item_selector=maximum_information_criterion,
        simulation=False,
        debug=False
)

# ====================
# Setup PsychoPy
# ====================

# general setup
win = visual.Window([800, 600],
                    monitor="testMonitor",
                    units="deg",
                    fullscr=False)

# init keyboard
keyboard.Keyboard()


# define function to get user input
def get_response(item: TestItem) -> int:
    # get index
    item_difficulty: float = item.b
    stimuli: str = [item for item in item_pool.test_items if item["Difficulty"] == item_difficulty][0]["word"]

    # create text box and display stimulus
    text_box = visual.TextBox2(win=win,
                               text=stimuli,
                               alignment="center",
                               size=24)
    # draw text
    text_box.draw()
    # update window
    win.flip()

    # wait for pressed keys
    while True:
        keys = event.getKeys()
        # if keys are not None
        if keys:
            # if the right arrow keys is pressed
            # return 1
            if keys[0] == "right":
                return 1
            # if the left arrow keys is pressed
            # return 0
            if keys[0] == "left":
                return 0


# override adaptive test default function
adaptive_test.get_response = get_response

# start adaptive test
while True:
    adaptive_test.run_test_once()

    # check stopping criterion
    if adaptive_test.standard_error <= 0.4:
        break

    # end test if all items have been shown
    if len(adaptive_test.item_pool.test_items) == 0:
        break

# save test results
data_context = CSVContext(
    adaptive_test.simulation_id,
    adaptive_test.participant_id
)

data_context.save(adaptive_test.test_results)
```

## Package Structure

The package is organized into several key modules:

- **`adaptivetesting.models`**: Core classes including `AdaptiveTest`, `ItemPool`, and `TestItem`
- **`adaptivetesting.implementations`**: Ready-to-use test implementations like `TestAssembler`
- **`adaptivetesting.math`**: Mathematical functions for IRT, ability estimation, and item selection
- **`adaptivetesting.simulation`**: Simulation framework and result management
- **`adaptivetesting.data`**: Data management utilities for CSV and pickle formats
- **`adaptivetesting.services`**: Abstract interfaces and protocols

## Advanced Features

### Multiple Stopping Criteria

```python
simulation.simulate(
    criterion=[StoppingCriterion.SE, StoppingCriterion.LENGTH],
    value=[0.3, 20]  # Stop at SE ≤ 0.3 OR length ≥ 20 items
)
```

### Pretest Phase

```python
adaptive_test = TestAssembler(
    item_pool=item_pool,
    simulation_id="pretest_sim",
    participant_id="participant_003",
    ability_estimator=BayesModal,
    estimator_args={"prior": NormalPrior(0, 1)},
    pretest=True,
    pretest_seed=42,
    simulation=True
)
```

### Custom Item Selection

```python
from adaptivetesting.math.item_selection import maximum_information_criterion

adaptive_test = TestAssembler(
    item_pool=item_pool,
    simulation_id="custom_selection",
    participant_id="participant_004",
    ability_estimator=BayesModal,
    estimator_args={"prior": NormalPrior(0, 1)},
    item_selector=maximum_information_criterion,
    item_selector_args={"additional_param": "value"},
    simulation=True
)
```

### Custom Optimization Interval
```python
adaptive_test = TestAssembler(
    item_pool=item_pool,
    simulation_id="pretest_sim",
    participant_id="participant_003",
    ability_estimator=BayesModal,
    estimator_args={
        "prior": NormalPrior(0, 1),
        "optimization_interval": (-5, 5)},
    pretest_seed=42,
    simulation=True
)
```

## Documentation

Full documentation is available in the `docs/` directory:

- [API Reference](docs/readme.md)
- [Models Module](docs/adaptivetesting.models.md)
- [Math Module](docs/adaptivetesting.math.md)
- [Implementation Examples](docs/adaptivetesting.implementations.md)
- [Simulation Guide](docs/adaptivetesting.simulation.md)

## Testing

The package includes comprehensive tests. Run them using:

```bash
python -m pytest adaptivetesting/tests/
```

## Contributing

We welcome contributions! Please see our [GitHub repository](https://github.com/condecon/adaptivetesting) for:

- Issue tracking
- Feature requests  
- Pull request guidelines
- Development setup

## Research and Applications

This package is designed for researchers and practitioners in:

- Educational assessment
- Psychological testing
- Cognitive ability measurement
- Adaptive learning systems
- Psychometric research

The package facilitates the transition from research simulation to real-world testing applications without requiring major code modifications.

## License

This project is licensed under the terms specified in the [LICENSE](LICENSE) file.

            

Raw data

            {
    "_id": null,
    "home_page": null,
    "name": "adaptivetesting",
    "maintainer": null,
    "docs_url": null,
    "requires_python": ">=3.10",
    "maintainer_email": null,
    "keywords": "computerized-adaptive-testing, item-response-theory, psychology, statistics",
    "author": null,
    "author_email": "Jonas Engicht <dev@condecon.de>",
    "download_url": "https://files.pythonhosted.org/packages/89/bf/c6ea61d9e73fe74d8954af5a25e6a957ef8bc8b389c963dbd6601f2291ae/adaptivetesting-1.1.0.tar.gz",
    "platform": null,
    "description": "# adaptivetesting\n\n<img src=\"/docs/source/_static/logo.svg\" style=\"width: 100%\">\n</img>\n\n**An open-source Python package for simplified, customizable Computerized Adaptive Testing (CAT) using Bayesian methods.**\n\n\n## Key Features\n\n- **Bayesian Methods**: Built-in support for Bayesian ability estimation with customizable priors\n- **Flexible Architecture**: Object-oriented design with abstract classes for easy extension\n- **Item Response Theory**: Full support for 1PL, 2PL, 3PL, and 4PL models\n- **Multiple Estimators**: \n  - Maximum Likelihood Estimation (MLE)\n  - Bayesian Modal Estimation (BM)\n  - Expected A Posteriori (EAP)\n- **Item Selection Strategies**: Kaximum information criterion and Urry's rule\n- **Simulation Framework**: Comprehensive tools for CAT simulation and evaluation\n- **Real-world Application**: Direct transition from simulation to production testing\n- **Stopping Criteria**: Support for standard error and test length criteria\n- **Data Management**: Built-in support for CSV and pickle data formats\n\n## Installation\n\nInstall from PyPI using pip:\n\n```bash\npip install adaptivetesting\n```\n\nFor the latest development version:\n\n```bash\npip install git+https://github.com/condecon/adaptivetesting\n```\n\n## Requirements\n\n- Python >= 3.10\n- NumPy >= 2.0.0\n- Pandas >= 2.2.0\n- SciPy >= 1.15.0\n- tqdm >= 4.67.1\n\n## Quick Start\n\n### Basic Example: Setting Up an Adaptive Test\n\n```python\nfrom adaptivetesting.models import ItemPool, TestItem\nfrom adaptivetesting.implementations import TestAssembler\nfrom adaptivetesting.math.estimators import BayesModal, NormalPrior\nfrom adaptivetesting.simulation import Simulation, StoppingCriterion, ResultOutputFormat\nimport pandas as pd\n\n# Create item pool from DataFrame\nitems_data = pd.DataFrame({\n    \"a\": [1.32, 1.07, 0.84, 1.19, 0.95],  # discrimination\n    \"b\": [-0.63, 0.18, -0.84, 0.41, -0.25],  # difficulty\n    \"c\": [0.17, 0.10, 0.19, 0.15, 0.12],  # guessing\n    \"d\": [0.87, 0.93, 1.0, 0.89, 0.94]   # upper asymptote\n})\nitem_pool = ItemPool.load_from_dataframe(items_data)\n\n# Set up adaptive test\nadaptive_test = TestAssembler(\n    item_pool=item_pool,\n    simulation_id=\"sim_001\",\n    participant_id=\"participant_001\",\n    ability_estimator=BayesModal,\n    estimator_args={\"prior\": NormalPrior(mean=0, sd=1)},\n    true_ability_level=0.5,  # For simulation\n    simulation=True\n)\n\n# Run simulation\nsimulation = Simulation(\n    test=adaptive_test,\n    test_result_output=ResultOutputFormat.CSV\n)\n\nsimulation.simulate(\n    criterion=StoppingCriterion.SE,\n    value=0.3  # Stop when standard error <= 0.3\n)\n\n# Save results\nsimulation.save_test_results()\n```\n\n### Custom Prior Example\n\n```python\nfrom adaptivetesting.math.estimators import CustomPrior\nfrom scipy.stats import t\n\n# Create custom t prior\ncustom_prior = CustomPrior(t, 100)\n\n# Use in estimator\nadaptive_test = TestAssembler(\n    item_pool=item_pool,\n    simulation_id=\"custom_prior_sim\",\n    participant_id=\"participant_002\",\n    ability_estimator=BayesModal,\n    estimator_args={\"prior\": custom_prior},\n    true_ability_level=0.0,\n    simulation=True\n)\n```\n\n### Real-world Testing (Non-simulation) with PsychoPy\n\n```python\nfrom psychopy import visual, event\nfrom psychopy.hardware import keyboard\nfrom adaptivetesting.implementations import TestAssembler\nfrom adaptivetesting.models import AdaptiveTest, ItemPool, TestItem\nfrom adaptivetesting.data import CSVContext\nfrom adaptivetesting.math.estimators import BayesModal, CustomPrior\nfrom adaptivetesting.math.item_selection import maximum_information_criterion\nfrom scipy.stats import t\nimport pandas as pd\n\n\n# Create item pool from DataFrame\nitems_data = pd.DataFrame({\n    \"a\": [1.32, 1.07, 0.84, 1.19, 0.95],  # discrimination\n    \"b\": [-0.63, 0.18, -0.84, 0.41, -0.25],  # difficulty\n    \"c\": [0.17, 0.10, 0.19, 0.15, 0.12],  # guessing\n    \"d\": [0.87, 0.93, 1.0, 0.89, 0.94]   # upper asymptote\n})\nitem_pool = ItemPool.load_from_dataframe(items_data)\n\n# Create adaptive test\nadaptive_test: AdaptiveTest = TestAssembler(\n        item_pool=item_pool,\n        simulation_id=\"example\",\n        participant_id=\"dummy\",\n        ability_estimator=BayesModal,\n        estimator_args={\n            \"prior\": CustomPrior(t, 100),\n            \"optimization_interval\":(-10, 10)\n        },\n        item_selector=maximum_information_criterion,\n        simulation=False,\n        debug=False\n)\n\n# ====================\n# Setup PsychoPy\n# ====================\n\n# general setup\nwin = visual.Window([800, 600],\n                    monitor=\"testMonitor\",\n                    units=\"deg\",\n                    fullscr=False)\n\n# init keyboard\nkeyboard.Keyboard()\n\n\n# define function to get user input\ndef get_response(item: TestItem) -> int:\n    # get index\n    item_difficulty: float = item.b\n    stimuli: str = [item for item in item_pool.test_items if item[\"Difficulty\"] == item_difficulty][0][\"word\"]\n\n    # create text box and display stimulus\n    text_box = visual.TextBox2(win=win,\n                               text=stimuli,\n                               alignment=\"center\",\n                               size=24)\n    # draw text\n    text_box.draw()\n    # update window\n    win.flip()\n\n    # wait for pressed keys\n    while True:\n        keys = event.getKeys()\n        # if keys are not None\n        if keys:\n            # if the right arrow keys is pressed\n            # return 1\n            if keys[0] == \"right\":\n                return 1\n            # if the left arrow keys is pressed\n            # return 0\n            if keys[0] == \"left\":\n                return 0\n\n\n# override adaptive test default function\nadaptive_test.get_response = get_response\n\n# start adaptive test\nwhile True:\n    adaptive_test.run_test_once()\n\n    # check stopping criterion\n    if adaptive_test.standard_error <= 0.4:\n        break\n\n    # end test if all items have been shown\n    if len(adaptive_test.item_pool.test_items) == 0:\n        break\n\n# save test results\ndata_context = CSVContext(\n    adaptive_test.simulation_id,\n    adaptive_test.participant_id\n)\n\ndata_context.save(adaptive_test.test_results)\n```\n\n## Package Structure\n\nThe package is organized into several key modules:\n\n- **`adaptivetesting.models`**: Core classes including `AdaptiveTest`, `ItemPool`, and `TestItem`\n- **`adaptivetesting.implementations`**: Ready-to-use test implementations like `TestAssembler`\n- **`adaptivetesting.math`**: Mathematical functions for IRT, ability estimation, and item selection\n- **`adaptivetesting.simulation`**: Simulation framework and result management\n- **`adaptivetesting.data`**: Data management utilities for CSV and pickle formats\n- **`adaptivetesting.services`**: Abstract interfaces and protocols\n\n## Advanced Features\n\n### Multiple Stopping Criteria\n\n```python\nsimulation.simulate(\n    criterion=[StoppingCriterion.SE, StoppingCriterion.LENGTH],\n    value=[0.3, 20]  # Stop at SE \u2264 0.3 OR length \u2265 20 items\n)\n```\n\n### Pretest Phase\n\n```python\nadaptive_test = TestAssembler(\n    item_pool=item_pool,\n    simulation_id=\"pretest_sim\",\n    participant_id=\"participant_003\",\n    ability_estimator=BayesModal,\n    estimator_args={\"prior\": NormalPrior(0, 1)},\n    pretest=True,\n    pretest_seed=42,\n    simulation=True\n)\n```\n\n### Custom Item Selection\n\n```python\nfrom adaptivetesting.math.item_selection import maximum_information_criterion\n\nadaptive_test = TestAssembler(\n    item_pool=item_pool,\n    simulation_id=\"custom_selection\",\n    participant_id=\"participant_004\",\n    ability_estimator=BayesModal,\n    estimator_args={\"prior\": NormalPrior(0, 1)},\n    item_selector=maximum_information_criterion,\n    item_selector_args={\"additional_param\": \"value\"},\n    simulation=True\n)\n```\n\n### Custom Optimization Interval\n```python\nadaptive_test = TestAssembler(\n    item_pool=item_pool,\n    simulation_id=\"pretest_sim\",\n    participant_id=\"participant_003\",\n    ability_estimator=BayesModal,\n    estimator_args={\n        \"prior\": NormalPrior(0, 1),\n        \"optimization_interval\": (-5, 5)},\n    pretest_seed=42,\n    simulation=True\n)\n```\n\n## Documentation\n\nFull documentation is available in the `docs/` directory:\n\n- [API Reference](docs/readme.md)\n- [Models Module](docs/adaptivetesting.models.md)\n- [Math Module](docs/adaptivetesting.math.md)\n- [Implementation Examples](docs/adaptivetesting.implementations.md)\n- [Simulation Guide](docs/adaptivetesting.simulation.md)\n\n## Testing\n\nThe package includes comprehensive tests. Run them using:\n\n```bash\npython -m pytest adaptivetesting/tests/\n```\n\n## Contributing\n\nWe welcome contributions! Please see our [GitHub repository](https://github.com/condecon/adaptivetesting) for:\n\n- Issue tracking\n- Feature requests  \n- Pull request guidelines\n- Development setup\n\n## Research and Applications\n\nThis package is designed for researchers and practitioners in:\n\n- Educational assessment\n- Psychological testing\n- Cognitive ability measurement\n- Adaptive learning systems\n- Psychometric research\n\nThe package facilitates the transition from research simulation to real-world testing applications without requiring major code modifications.\n\n## License\n\nThis project is licensed under the terms specified in the [LICENSE](LICENSE) file.\n",
    "bugtrack_url": null,
    "license": null,
    "summary": "adaptivetesting is a Python package that can be used to simulate and evaluate custom CAT scenarios as well as implement them in real-world testing scenarios from a single codebase",
    "version": "1.1.0",
    "project_urls": {
        "Documentation": "https://github.com/condecon/adaptivetesting/tree/main/docs",
        "Homepage": "https://github.com/condecon/adaptivetesting",
        "Issues": "https://github.com/condecon/adaptivetesting/issues",
        "Repository": "https://github.com/condecon/adaptivetesting.git"
    },
    "split_keywords": [
        "computerized-adaptive-testing",
        " item-response-theory",
        " psychology",
        " statistics"
    ],
    "urls": [
        {
            "comment_text": null,
            "digests": {
                "blake2b_256": "324166119fbfb2b9cf66118e882a579ed8bfb9efbdf76f0bb8c1f4f30992e564",
                "md5": "bc7981677b1f3136eedb614cc7814987",
                "sha256": "b9ee5cbecb33a17d3203ce34d6958b2a07b7b1dcb62b1933110633c9df8830c8"
            },
            "downloads": -1,
            "filename": "adaptivetesting-1.1.0-py3-none-any.whl",
            "has_sig": false,
            "md5_digest": "bc7981677b1f3136eedb614cc7814987",
            "packagetype": "bdist_wheel",
            "python_version": "py3",
            "requires_python": ">=3.10",
            "size": 53674,
            "upload_time": "2025-07-19T08:36:05",
            "upload_time_iso_8601": "2025-07-19T08:36:05.164335Z",
            "url": "https://files.pythonhosted.org/packages/32/41/66119fbfb2b9cf66118e882a579ed8bfb9efbdf76f0bb8c1f4f30992e564/adaptivetesting-1.1.0-py3-none-any.whl",
            "yanked": false,
            "yanked_reason": null
        },
        {
            "comment_text": null,
            "digests": {
                "blake2b_256": "89bfc6ea61d9e73fe74d8954af5a25e6a957ef8bc8b389c963dbd6601f2291ae",
                "md5": "1354e59474355968d1afb868b1ecb713",
                "sha256": "b34e78e0ec33e68f93f3b1cbd7ddf1ba56ee95939ac383e9a4465bb3b63e9b28"
            },
            "downloads": -1,
            "filename": "adaptivetesting-1.1.0.tar.gz",
            "has_sig": false,
            "md5_digest": "1354e59474355968d1afb868b1ecb713",
            "packagetype": "sdist",
            "python_version": "source",
            "requires_python": ">=3.10",
            "size": 156307,
            "upload_time": "2025-07-19T08:36:06",
            "upload_time_iso_8601": "2025-07-19T08:36:06.369895Z",
            "url": "https://files.pythonhosted.org/packages/89/bf/c6ea61d9e73fe74d8954af5a25e6a957ef8bc8b389c963dbd6601f2291ae/adaptivetesting-1.1.0.tar.gz",
            "yanked": false,
            "yanked_reason": null
        }
    ],
    "upload_time": "2025-07-19 08:36:06",
    "github": true,
    "gitlab": false,
    "bitbucket": false,
    "codeberg": false,
    "github_user": "condecon",
    "github_project": "adaptivetesting",
    "travis_ci": false,
    "coveralls": false,
    "github_actions": true,
    "requirements": [],
    "lcname": "adaptivetesting"
}
        
Elapsed time: 1.04802s