Name | medvision-segmentation JSON |
Version |
0.2.1
JSON |
| download |
home_page | None |
Summary | A medical image segmentation framework based on PyTorch Lightning |
upload_time | 2025-08-14 12:15:46 |
maintainer | None |
docs_url | None |
author | None |
requires_python | >=3.8 |
license | MIT License
Copyright (c) 2025 weizhipeng
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
keywords |
medical imaging
segmentation
pytorch
deep learning
monai
|
VCS |
 |
bugtrack_url |
|
requirements |
No requirements were recorded.
|
Travis-CI |
No Travis.
|
coveralls test coverage |
No coveralls.
|
# MedVision
MedVision 是一个基于 PyTorch Lightning 的医学影像分割框架,提供了训练和推理的简单接口。
## 特点
- 基于 PyTorch Lightning 的高级接口
- 支持常见的医学影像格式(NIfTI、DICOM 等)
- 内置多种分割模型架构(如 UNet)
- 灵活的数据加载和预处理管道
- 模块化设计,易于扩展
- 命令行界面用于训练和推理
## 安装
### 系统要求
- Python 3.8+
- PyTorch 2.0+
- CUDA (可选,用于GPU加速)
### 基本安装
最简单的安装方式:
```bash
pip install -e .
```
### 从源码安装
```bash
git clone https://github.com/yourusername/medvision.git
cd medvision
pip install -e .
```
### 使用requirements文件
```bash
# 基本环境
pip install -r requirements.txt
# 开发环境
pip install -r requirements-dev.txt
```
### 使用conda环境
推荐使用 conda 创建独立的虚拟环境:
```bash
# 创建并激活环境
conda env create -f environment.yml
conda activate medvision
# 安装项目本身
pip install -e .
```
如果您需要更新现有环境:
```bash
conda env update -f environment.yml --prune
```
如果您想删除环境:
```bash
conda env remove -n medvision
```
### 功能模块安装
根据需求选择特定的功能组:
```bash
# 医学影像处理
pip install -e ".[medical]"
# 数据变换
pip install -e ".[transforms]"
# 可视化工具
pip install -e ".[visualization]"
# 评估指标
pip install -e ".[metrics]"
# 开发工具
pip install -e ".[dev]"
# 文档生成
pip install -e ".[docs]"
# 完整安装
pip install -e ".[all]"
```
### 开发环境设置
如果您要参与开发:
```bash
# 安装开发依赖
pip install -e ".[dev]"
# 安装pre-commit钩子
pre-commit install
# 或使用Makefile
make install-dev
```
### 验证安装
```bash
python -c "import medvision; print(medvision.__version__)"
MedVision --help
```
## 快速入门
### 训练模型
```bash
MedVision train configs/train_config.yml
```
### 测试模型
```bash
MedVision test configs/test_config.yml
```
## 配置格式
### 训练配置示例
```yaml
# General settings
seed: 42
# Model configuration
model:
type: "segmentation"
network:
name: "denseunet"
in_channels: 1
out_channels: 1
features: [32, 64, 128, 256]
dropout: 0.1
loss:
type: "dice"
smooth: 0.00001
optimizer:
type: "adam"
lr: 0.001
weight_decay: 0.0001
scheduler:
type: "plateau"
patience: 5
factor: 0.5
monitor: "val/val_loss" #`train/train_loss`, `train/train_loss_step`, `val/val_loss`, `val/val_dice`, `val/val_iou`, `train/train_loss_epoch`, `train/train_dice`, `train/train_iou`
metrics:
dice:
type: "dice"
threshold: 0.5
iou:
type: "iou"
threshold: 0.5
# Data configuration
data:
type: "medical"
batch_size: 8
num_workers: 4
data_dir: "data/2D"
train_val_split: [0.8, 0.2]
dataset_args:
image_subdir: "images"
mask_subdir: "masks"
image_suffix: "*.png"
mask_suffix: "*.png"
# 使用简化但完整的MONAI transforms
train_transforms:
# 1. 基础预处理
Resized:
keys: ["image", "label"]
spatial_size: [256, 256]
mode: ["bilinear", "nearest"]
align_corners: [false, null]
# 2. 空间变换 - 提升泛化能力
RandRotated:
keys: ["image", "label"]
range_x: 0.2 # ±0.2弧度 ≈ ±11.5度
range_y: 0.2
prob: 0.5
mode: ["bilinear", "nearest"]
padding_mode: "border"
align_corners: [false, null]
RandFlipd:
keys: ["image", "label"]
spatial_axis: [0, 1] # 水平和垂直翻转
prob: 0.5
RandAffined:
keys: ["image", "label"]
prob: 0.3
rotate_range: [0.1, 0.1] # 小角度旋转
scale_range: [0.1, 0.1] # 缩放范围 0.9-1.1
translate_range: [10, 10] # 平移像素数
mode: ["bilinear", "nearest"]
padding_mode: "border"
align_corners: [false, null]
RandZoomd:
keys: ["image", "label"]
min_zoom: 0.85
max_zoom: 1.15
prob: 0.3
mode: ["bilinear", "nearest"]
align_corners: [false, null]
# 3. 强度变换(仅对图像)
RandAdjustContrastd:
keys: ["image"]
prob: 0.3
gamma: [0.8, 1.2] # 对比度调整范围
RandScaleIntensityd:
keys: ["image"]
factors: 0.2 # 强度缩放因子
prob: 0.3
RandShiftIntensityd:
keys: ["image"]
offsets: 0.1 # 强度偏移
prob: 0.3
RandGaussianNoised:
keys: ["image"]
prob: 0.2
mean: 0.0
std: 0.1
RandGaussianSmoothd:
keys: ["image"]
prob: 0.1
sigma_x: [0.5, 1.0]
sigma_y: [0.5, 1.0]
RandBiasFieldd:
keys: ["image"]
prob: 0.15
degree: 3
coeff_range: [0.0, 0.1]
# 4. 归一化
NormalizeIntensityd:
keys: ["image"]
nonzero: true
channel_wise: false
val_transforms:
# 验证时只做基础预处理
Resized:
keys: ["image", "label"]
spatial_size: [256, 256]
mode: ["bilinear", "nearest"]
align_corners: [false, null]
NormalizeIntensityd:
keys: ["image"]
nonzero: true
channel_wise: false
test_transforms:
# 测试时只做基础预处理
Resized:
keys: ["image", "label"]
spatial_size: [256, 256]
mode: ["bilinear", "nearest"]
align_corners: [false, null]
NormalizeIntensityd:
keys: ["image"]
nonzero: true
channel_wise: false
# Training configuration
training:
max_epochs: 2
devices: 1
accelerator: "auto"
precision: 16-mixed
output_dir: "outputs"
experiment_name: "brain_tumor_segmentation"
monitor: "val/val_loss" #`train/train_loss`, `train/train_loss_step`, `val/val_loss`, `val/val_dice`, `val/val_iou`, `train/train_loss_epoch`, `train/train_dice`, `train/train_iou`
monitor_mode: "min"
early_stopping: true
patience: 10
save_top_k: 3
log_every_n_steps: 10
deterministic: false
```
### 测试配置示例
```yaml
# General settings
seed: 42
# Model configuration
model:
type: "segmentation"
network:
name: "unet"
in_channels: 1
out_channels: 1
features: [32, 64, 128, 256]
dropout: 0.0
metrics:
dice:
type: "dice"
threshold: 0.5
iou:
type: "iou"
threshold: 0.5
accuracy:
type: "accuracy"
threshold: 0.5
loss:
type: "dice"
smooth: 0.00001
# Checkpoint path
checkpoint_path: "outputs/checkpoints/last.ckpt"
# Data configuration
data:
type: "medical"
batch_size: 8
num_workers: 4
data_dir: "data/2D"
# 数据集参数 - 与训练配置保持一致
dataset_args:
image_subdir: "images"
mask_subdir: "masks"
image_suffix: "*.png"
mask_suffix: "*.png"
# 测试变换 - 与训练时的验证变换完全一致
test_transforms:
Resized:
keys: ["image", "label"]
spatial_size: [256, 256]
mode: ["bilinear", "nearest"]
align_corners: [false, null]
NormalizeIntensityd:
keys: ["image"]
nonzero: true
channel_wise: false
# Testing configuration
testing:
devices: 1
accelerator: "auto"
precision: 16-mixed
output_dir: "outputs/predictions"
```
### 推理配置示例
```yaml
# Inference configuration for MedVision
# This config is for pure inference without labels
# General settings
seed: 42
# Model configuration (should match training)
model:
type: "segmentation"
network:
name: "unet"
in_channels: 1
out_channels: 1
features: [32, 64, 128, 256]
dropout: 0.0 # 推理时关闭dropout
# 推理时仍需要loss配置(但不会使用)
loss:
type: "dice"
smooth: 0.00001
# Checkpoint path - 必须指定训练好的模型
checkpoint_path: "outputs/checkpoints/last.ckpt"
# Inference configuration
inference:
# 输入图像目录 (只包含图像,不需要标签)
image_dir: "data/2D/images"
# 输出配置
output_dir: "outputs/predictions"
save_format: "png" # png, npy
# 数据加载配置
batch_size: 4
num_workers: 4
pin_memory: true
image_suffix: "*.png"
# 硬件配置
devices: 1
accelerator: "auto"
precision: 16-mixed
# 推理变换 (只处理图像,不需要label)
transforms:
Resized:
keys: ["image"] # 注意:只有image,没有label
spatial_size: [256, 256]
mode: "bilinear"
align_corners: false
NormalizeIntensity:
keys: ["image"]
nonzero: true
```
## 自定义扩展
### 添加新的模型架构
1. 在 `medvision/models/` 目录下创建新的模型文件
2. 更新 `get_model` 函数以识别新的模型类型
### 添加新的数据集
1. 在 `medvision/datasets/` 目录下创建新的数据集类
2. 更新 `get_datamodule` 函数以识别新的数据集类型
## 许可证
MIT
## 贡献指南
欢迎贡献!请查看 [CONTRIBUTING.md](CONTRIBUTING.md) 获取详细信息。
Raw data
{
"_id": null,
"home_page": null,
"name": "medvision-segmentation",
"maintainer": null,
"docs_url": null,
"requires_python": ">=3.8",
"maintainer_email": "weizhipeng <weizhipeng@shu.edu.cn>",
"keywords": "medical imaging, segmentation, pytorch, deep learning, MONAI",
"author": null,
"author_email": "weizhipeng <weizhipeng@shu.edu.cn>",
"download_url": "https://files.pythonhosted.org/packages/db/25/ae1cad9476469e507e7380bac64948371433a796820a11b2efbfb85efcdb/medvision_segmentation-0.2.1.tar.gz",
"platform": null,
"description": "# MedVision\n\nMedVision \u662f\u4e00\u4e2a\u57fa\u4e8e PyTorch Lightning \u7684\u533b\u5b66\u5f71\u50cf\u5206\u5272\u6846\u67b6\uff0c\u63d0\u4f9b\u4e86\u8bad\u7ec3\u548c\u63a8\u7406\u7684\u7b80\u5355\u63a5\u53e3\u3002\n\n## \u7279\u70b9\n\n- \u57fa\u4e8e PyTorch Lightning \u7684\u9ad8\u7ea7\u63a5\u53e3\n- \u652f\u6301\u5e38\u89c1\u7684\u533b\u5b66\u5f71\u50cf\u683c\u5f0f\uff08NIfTI\u3001DICOM \u7b49\uff09\n- \u5185\u7f6e\u591a\u79cd\u5206\u5272\u6a21\u578b\u67b6\u6784\uff08\u5982 UNet\uff09\n- \u7075\u6d3b\u7684\u6570\u636e\u52a0\u8f7d\u548c\u9884\u5904\u7406\u7ba1\u9053\n- \u6a21\u5757\u5316\u8bbe\u8ba1\uff0c\u6613\u4e8e\u6269\u5c55\n- \u547d\u4ee4\u884c\u754c\u9762\u7528\u4e8e\u8bad\u7ec3\u548c\u63a8\u7406\n\n## \u5b89\u88c5\n\n### \u7cfb\u7edf\u8981\u6c42\n\n- Python 3.8+\n- PyTorch 2.0+\n- CUDA (\u53ef\u9009\uff0c\u7528\u4e8eGPU\u52a0\u901f)\n\n### \u57fa\u672c\u5b89\u88c5\n\n\u6700\u7b80\u5355\u7684\u5b89\u88c5\u65b9\u5f0f\uff1a\n\n```bash\npip install -e .\n```\n\n### \u4ece\u6e90\u7801\u5b89\u88c5\n\n```bash\ngit clone https://github.com/yourusername/medvision.git\ncd medvision\npip install -e .\n```\n\n### \u4f7f\u7528requirements\u6587\u4ef6\n\n```bash\n# \u57fa\u672c\u73af\u5883\npip install -r requirements.txt\n\n# \u5f00\u53d1\u73af\u5883\npip install -r requirements-dev.txt\n```\n\n### \u4f7f\u7528conda\u73af\u5883\n\n\u63a8\u8350\u4f7f\u7528 conda \u521b\u5efa\u72ec\u7acb\u7684\u865a\u62df\u73af\u5883\uff1a\n\n```bash\n# \u521b\u5efa\u5e76\u6fc0\u6d3b\u73af\u5883\nconda env create -f environment.yml\nconda activate medvision\n\n# \u5b89\u88c5\u9879\u76ee\u672c\u8eab\npip install -e .\n```\n\n\u5982\u679c\u60a8\u9700\u8981\u66f4\u65b0\u73b0\u6709\u73af\u5883\uff1a\n\n```bash\nconda env update -f environment.yml --prune\n```\n\n\u5982\u679c\u60a8\u60f3\u5220\u9664\u73af\u5883\uff1a\n\n```bash\nconda env remove -n medvision\n```\n\n### \u529f\u80fd\u6a21\u5757\u5b89\u88c5\n\n\u6839\u636e\u9700\u6c42\u9009\u62e9\u7279\u5b9a\u7684\u529f\u80fd\u7ec4\uff1a\n\n```bash\n# \u533b\u5b66\u5f71\u50cf\u5904\u7406\npip install -e \".[medical]\"\n\n# \u6570\u636e\u53d8\u6362\npip install -e \".[transforms]\"\n\n# \u53ef\u89c6\u5316\u5de5\u5177\npip install -e \".[visualization]\"\n\n# \u8bc4\u4f30\u6307\u6807\npip install -e \".[metrics]\"\n\n# \u5f00\u53d1\u5de5\u5177\npip install -e \".[dev]\"\n\n# \u6587\u6863\u751f\u6210\npip install -e \".[docs]\"\n\n# \u5b8c\u6574\u5b89\u88c5\npip install -e \".[all]\"\n```\n\n### \u5f00\u53d1\u73af\u5883\u8bbe\u7f6e\n\n\u5982\u679c\u60a8\u8981\u53c2\u4e0e\u5f00\u53d1\uff1a\n\n```bash\n# \u5b89\u88c5\u5f00\u53d1\u4f9d\u8d56\npip install -e \".[dev]\"\n\n# \u5b89\u88c5pre-commit\u94a9\u5b50\npre-commit install\n\n# \u6216\u4f7f\u7528Makefile\nmake install-dev\n```\n\n### \u9a8c\u8bc1\u5b89\u88c5\n\n```bash\npython -c \"import medvision; print(medvision.__version__)\"\nMedVision --help\n```\n\n## \u5feb\u901f\u5165\u95e8\n\n### \u8bad\u7ec3\u6a21\u578b\n\n```bash\nMedVision train configs/train_config.yml\n```\n\n### \u6d4b\u8bd5\u6a21\u578b\n\n```bash\nMedVision test configs/test_config.yml\n```\n\n## \u914d\u7f6e\u683c\u5f0f\n\n### \u8bad\u7ec3\u914d\u7f6e\u793a\u4f8b\n\n```yaml\n# General settings\nseed: 42\n\n# Model configuration\nmodel:\n type: \"segmentation\"\n network:\n name: \"denseunet\"\n\n in_channels: 1\n out_channels: 1\n features: [32, 64, 128, 256]\n dropout: 0.1\n loss:\n type: \"dice\"\n smooth: 0.00001\n optimizer:\n type: \"adam\"\n lr: 0.001\n weight_decay: 0.0001\n scheduler:\n type: \"plateau\"\n patience: 5\n factor: 0.5\n monitor: \"val/val_loss\" #`train/train_loss`, `train/train_loss_step`, `val/val_loss`, `val/val_dice`, `val/val_iou`, `train/train_loss_epoch`, `train/train_dice`, `train/train_iou`\n metrics:\n dice:\n type: \"dice\"\n threshold: 0.5\n iou:\n type: \"iou\"\n threshold: 0.5\n\n# Data configuration\ndata:\n type: \"medical\"\n batch_size: 8\n num_workers: 4\n data_dir: \"data/2D\"\n train_val_split: [0.8, 0.2]\n dataset_args: \n image_subdir: \"images\" \n mask_subdir: \"masks\" \n image_suffix: \"*.png\" \n mask_suffix: \"*.png\" \n\n# \u4f7f\u7528\u7b80\u5316\u4f46\u5b8c\u6574\u7684MONAI transforms \n train_transforms:\n # 1. \u57fa\u7840\u9884\u5904\u7406\n Resized:\n keys: [\"image\", \"label\"]\n spatial_size: [256, 256]\n mode: [\"bilinear\", \"nearest\"]\n align_corners: [false, null]\n \n # 2. \u7a7a\u95f4\u53d8\u6362 - \u63d0\u5347\u6cdb\u5316\u80fd\u529b\n RandRotated:\n keys: [\"image\", \"label\"]\n range_x: 0.2 # \u00b10.2\u5f27\u5ea6 \u2248 \u00b111.5\u5ea6\n range_y: 0.2\n prob: 0.5\n mode: [\"bilinear\", \"nearest\"]\n padding_mode: \"border\"\n align_corners: [false, null]\n \n RandFlipd:\n keys: [\"image\", \"label\"]\n spatial_axis: [0, 1] # \u6c34\u5e73\u548c\u5782\u76f4\u7ffb\u8f6c\n prob: 0.5\n \n RandAffined:\n keys: [\"image\", \"label\"]\n prob: 0.3\n rotate_range: [0.1, 0.1] # \u5c0f\u89d2\u5ea6\u65cb\u8f6c\n scale_range: [0.1, 0.1] # \u7f29\u653e\u8303\u56f4 0.9-1.1\n translate_range: [10, 10] # \u5e73\u79fb\u50cf\u7d20\u6570\n mode: [\"bilinear\", \"nearest\"]\n padding_mode: \"border\"\n align_corners: [false, null]\n \n RandZoomd:\n keys: [\"image\", \"label\"]\n min_zoom: 0.85\n max_zoom: 1.15\n prob: 0.3\n mode: [\"bilinear\", \"nearest\"]\n align_corners: [false, null]\n \n # 3. \u5f3a\u5ea6\u53d8\u6362\uff08\u4ec5\u5bf9\u56fe\u50cf\uff09\n RandAdjustContrastd:\n keys: [\"image\"]\n prob: 0.3\n gamma: [0.8, 1.2] # \u5bf9\u6bd4\u5ea6\u8c03\u6574\u8303\u56f4\n \n RandScaleIntensityd:\n keys: [\"image\"]\n factors: 0.2 # \u5f3a\u5ea6\u7f29\u653e\u56e0\u5b50\n prob: 0.3\n \n RandShiftIntensityd:\n keys: [\"image\"]\n offsets: 0.1 # \u5f3a\u5ea6\u504f\u79fb\n prob: 0.3\n \n RandGaussianNoised:\n keys: [\"image\"]\n prob: 0.2\n mean: 0.0\n std: 0.1\n \n RandGaussianSmoothd:\n keys: [\"image\"]\n prob: 0.1\n sigma_x: [0.5, 1.0]\n sigma_y: [0.5, 1.0]\n \n RandBiasFieldd:\n keys: [\"image\"]\n prob: 0.15\n degree: 3\n coeff_range: [0.0, 0.1]\n \n # 4. \u5f52\u4e00\u5316\n NormalizeIntensityd:\n keys: [\"image\"]\n nonzero: true\n channel_wise: false\n val_transforms:\n # \u9a8c\u8bc1\u65f6\u53ea\u505a\u57fa\u7840\u9884\u5904\u7406\n Resized:\n keys: [\"image\", \"label\"]\n spatial_size: [256, 256]\n mode: [\"bilinear\", \"nearest\"]\n align_corners: [false, null]\n \n NormalizeIntensityd:\n keys: [\"image\"]\n nonzero: true\n channel_wise: false\n \n test_transforms:\n # \u6d4b\u8bd5\u65f6\u53ea\u505a\u57fa\u7840\u9884\u5904\u7406\n Resized:\n keys: [\"image\", \"label\"]\n spatial_size: [256, 256]\n mode: [\"bilinear\", \"nearest\"]\n align_corners: [false, null]\n \n NormalizeIntensityd:\n keys: [\"image\"]\n nonzero: true\n channel_wise: false\n \n# Training configuration\ntraining:\n max_epochs: 2\n devices: 1\n accelerator: \"auto\"\n precision: 16-mixed \n output_dir: \"outputs\"\n experiment_name: \"brain_tumor_segmentation\"\n monitor: \"val/val_loss\" #`train/train_loss`, `train/train_loss_step`, `val/val_loss`, `val/val_dice`, `val/val_iou`, `train/train_loss_epoch`, `train/train_dice`, `train/train_iou`\n monitor_mode: \"min\"\n early_stopping: true\n patience: 10\n save_top_k: 3\n log_every_n_steps: 10\n deterministic: false\n\n```\n\n### \u6d4b\u8bd5\u914d\u7f6e\u793a\u4f8b\n\n```yaml\n# General settings\nseed: 42\n\n# Model configuration\nmodel:\n type: \"segmentation\"\n network:\n name: \"unet\"\n \n in_channels: 1\n out_channels: 1\n features: [32, 64, 128, 256]\n dropout: 0.0 \n metrics:\n dice:\n type: \"dice\"\n threshold: 0.5\n iou:\n type: \"iou\" \n threshold: 0.5\n accuracy:\n type: \"accuracy\"\n threshold: 0.5\n loss:\n type: \"dice\"\n smooth: 0.00001\n# Checkpoint path\ncheckpoint_path: \"outputs/checkpoints/last.ckpt\"\n\n# Data configuration\ndata:\n type: \"medical\"\n batch_size: 8 \n num_workers: 4\n data_dir: \"data/2D\"\n \n # \u6570\u636e\u96c6\u53c2\u6570 - \u4e0e\u8bad\u7ec3\u914d\u7f6e\u4fdd\u6301\u4e00\u81f4\n dataset_args: \n image_subdir: \"images\" \n mask_subdir: \"masks\" \n image_suffix: \"*.png\" \n mask_suffix: \"*.png\"\n \n # \u6d4b\u8bd5\u53d8\u6362 - \u4e0e\u8bad\u7ec3\u65f6\u7684\u9a8c\u8bc1\u53d8\u6362\u5b8c\u5168\u4e00\u81f4\n test_transforms:\n Resized:\n keys: [\"image\", \"label\"]\n spatial_size: [256, 256]\n mode: [\"bilinear\", \"nearest\"]\n align_corners: [false, null]\n \n NormalizeIntensityd:\n keys: [\"image\"]\n nonzero: true\n channel_wise: false\n \n# Testing configuration\ntesting:\n devices: 1\n accelerator: \"auto\"\n precision: 16-mixed \n output_dir: \"outputs/predictions\"\n\n```\n\n### \u63a8\u7406\u914d\u7f6e\u793a\u4f8b\n```yaml\n\n# Inference configuration for MedVision\n# This config is for pure inference without labels\n\n# General settings\nseed: 42\n\n# Model configuration (should match training)\nmodel:\n type: \"segmentation\"\n network:\n name: \"unet\"\n \n in_channels: 1\n out_channels: 1\n features: [32, 64, 128, 256]\n dropout: 0.0 # \u63a8\u7406\u65f6\u5173\u95eddropout\n # \u63a8\u7406\u65f6\u4ecd\u9700\u8981loss\u914d\u7f6e(\u4f46\u4e0d\u4f1a\u4f7f\u7528)\n loss:\n type: \"dice\"\n smooth: 0.00001\n\n# Checkpoint path - \u5fc5\u987b\u6307\u5b9a\u8bad\u7ec3\u597d\u7684\u6a21\u578b\ncheckpoint_path: \"outputs/checkpoints/last.ckpt\"\n\n# Inference configuration\ninference:\n # \u8f93\u5165\u56fe\u50cf\u76ee\u5f55 (\u53ea\u5305\u542b\u56fe\u50cf\uff0c\u4e0d\u9700\u8981\u6807\u7b7e)\n image_dir: \"data/2D/images\"\n \n # \u8f93\u51fa\u914d\u7f6e\n output_dir: \"outputs/predictions\"\n save_format: \"png\" # png, npy\n \n # \u6570\u636e\u52a0\u8f7d\u914d\u7f6e\n batch_size: 4\n num_workers: 4\n pin_memory: true\n image_suffix: \"*.png\"\n \n # \u786c\u4ef6\u914d\u7f6e\n devices: 1\n accelerator: \"auto\"\n precision: 16-mixed \n \n # \u63a8\u7406\u53d8\u6362 (\u53ea\u5904\u7406\u56fe\u50cf\uff0c\u4e0d\u9700\u8981label)\n transforms:\n Resized:\n keys: [\"image\"] # \u6ce8\u610f\uff1a\u53ea\u6709image\uff0c\u6ca1\u6709label\n spatial_size: [256, 256]\n mode: \"bilinear\"\n align_corners: false\n \n NormalizeIntensity: \n keys: [\"image\"]\n nonzero: true\n\n\n```\n## \u81ea\u5b9a\u4e49\u6269\u5c55\n\n### \u6dfb\u52a0\u65b0\u7684\u6a21\u578b\u67b6\u6784\n\n1. \u5728 `medvision/models/` \u76ee\u5f55\u4e0b\u521b\u5efa\u65b0\u7684\u6a21\u578b\u6587\u4ef6\n2. \u66f4\u65b0 `get_model` \u51fd\u6570\u4ee5\u8bc6\u522b\u65b0\u7684\u6a21\u578b\u7c7b\u578b\n\n### \u6dfb\u52a0\u65b0\u7684\u6570\u636e\u96c6\n\n1. \u5728 `medvision/datasets/` \u76ee\u5f55\u4e0b\u521b\u5efa\u65b0\u7684\u6570\u636e\u96c6\u7c7b\n2. \u66f4\u65b0 `get_datamodule` \u51fd\u6570\u4ee5\u8bc6\u522b\u65b0\u7684\u6570\u636e\u96c6\u7c7b\u578b\n\n## \u8bb8\u53ef\u8bc1\n\nMIT\n\n## \u8d21\u732e\u6307\u5357\n\n\u6b22\u8fce\u8d21\u732e\uff01\u8bf7\u67e5\u770b [CONTRIBUTING.md](CONTRIBUTING.md) \u83b7\u53d6\u8be6\u7ec6\u4fe1\u606f\u3002\n",
"bugtrack_url": null,
"license": "MIT License\n \n Copyright (c) 2025 weizhipeng\n \n Permission is hereby granted, free of charge, to any person obtaining a copy\n of this software and associated documentation files (the \"Software\"), to deal\n in the Software without restriction, including without limitation the rights\n to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n \n The above copyright notice and this permission notice shall be included in all\n copies or substantial portions of the Software.\n \n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n SOFTWARE.\n ",
"summary": "A medical image segmentation framework based on PyTorch Lightning",
"version": "0.2.1",
"project_urls": {
"Bug Tracker": "https://github.com/yourusername/medvision/issues",
"Documentation": "https://medvision.readthedocs.io",
"Homepage": "https://github.com/yourusername/medvision",
"Repository": "https://github.com/yourusername/medvision"
},
"split_keywords": [
"medical imaging",
" segmentation",
" pytorch",
" deep learning",
" monai"
],
"urls": [
{
"comment_text": null,
"digests": {
"blake2b_256": "53eea77564727a515994bbb0b2fb9eb9b4460606a18849b646448186d95ee315",
"md5": "8705724b838ce18b209042537f5104cf",
"sha256": "da22b5f51b38e40025635b98c7ea50c5c770ad27e4c5013daed3da30c9563c60"
},
"downloads": -1,
"filename": "medvision_segmentation-0.2.1-py3-none-any.whl",
"has_sig": false,
"md5_digest": "8705724b838ce18b209042537f5104cf",
"packagetype": "bdist_wheel",
"python_version": "py3",
"requires_python": ">=3.8",
"size": 57622,
"upload_time": "2025-08-14T12:15:45",
"upload_time_iso_8601": "2025-08-14T12:15:45.123909Z",
"url": "https://files.pythonhosted.org/packages/53/ee/a77564727a515994bbb0b2fb9eb9b4460606a18849b646448186d95ee315/medvision_segmentation-0.2.1-py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": null,
"digests": {
"blake2b_256": "db25ae1cad9476469e507e7380bac64948371433a796820a11b2efbfb85efcdb",
"md5": "f6d9ad97ac39387d4d726417d3192341",
"sha256": "46c85e732ec20fd8a92422606953835067291c5de86dbf648be40143365e01d6"
},
"downloads": -1,
"filename": "medvision_segmentation-0.2.1.tar.gz",
"has_sig": false,
"md5_digest": "f6d9ad97ac39387d4d726417d3192341",
"packagetype": "sdist",
"python_version": "source",
"requires_python": ">=3.8",
"size": 54587,
"upload_time": "2025-08-14T12:15:46",
"upload_time_iso_8601": "2025-08-14T12:15:46.638542Z",
"url": "https://files.pythonhosted.org/packages/db/25/ae1cad9476469e507e7380bac64948371433a796820a11b2efbfb85efcdb/medvision_segmentation-0.2.1.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2025-08-14 12:15:46",
"github": true,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"github_user": "yourusername",
"github_project": "medvision",
"github_not_found": true,
"lcname": "medvision-segmentation"
}