wxbs-benchmark


Namewxbs-benchmark JSON
Version 0.0.4 PyPI version JSON
download
home_pagehttps://github.com/ducha-aiki/wxbs_benchmark/tree/master/
SummaryCode for benchmarking image matchers on WxBS dataset
upload_time2023-10-31 14:28:52
maintainer
docs_urlNone
authorDmytro Mishkin
requires_python>=3.6
licenseApache Software License 2.0
keywords wxbs image matching benchmark image correspondences
VCS
bugtrack_url
requirements No requirements were recorded.
Travis-CI No Travis.
coveralls test coverage No coveralls.
            # wxbs_benchmark

<!-- WARNING: THIS FILE WAS AUTOGENERATED! DO NOT EDIT! -->

## Install

`pip install wxbs_benchmark`

## How to use

## Task 1: fundamental matrix estimation

I will show you how to benchmark a simple baseline of OpenCV SIFT +
MAGSAC++ below.

``` python
import numpy as np
import cv2
import kornia.feature as KF
import torch
from kornia_moons.feature import *
from tqdm import tqdm
from wxbs_benchmark.dataset import *
from wxbs_benchmark.evaluation import *
import matplotlib.pyplot as plt


def estimate_F_SIFT(img1, img2):
    det = cv2.SIFT_create(8000, contrastThreshold=-10000, edgeThreshold=10000)
    kps1, descs1 = det.detectAndCompute(img1, None)
    kps2, descs2 = det.detectAndCompute(img2, None)
    snn_ratio, idxs = KF.match_snn(torch.from_numpy(descs1),
                           torch.from_numpy(descs2), 0.9)
    tentatives = cv2_matches_from_kornia(snn_ratio, idxs)
    src_pts = np.float32([ kps1[m.queryIdx].pt for m in tentatives ]).reshape(-1,2)
    dst_pts = np.float32([ kps2[m.trainIdx].pt for m in tentatives ]).reshape(-1,2)
    F, _ = cv2.findFundamentalMat(src_pts, dst_pts, cv2.USAC_MAGSAC, 0.25, 0.999, 100000)
    return F


Fs = []
subset = 'test'
dset = WxBSDataset('.WxBS', subset=subset, download=True)
for pair_dict in tqdm(dset):
    Fs.append(estimate_F_SIFT(pair_dict['img1'],
                         pair_dict['img2']))
result_dict, thresholds = evaluate_Fs(Fs, subset)
```

    100%|███████████████████████████████████████████| 32/32 [00:11<00:00,  2.67it/s]

``` python
plt.figure()
plt.plot(thresholds, result_dict['average'], '-x')
plt.ylim([0,1.05])
plt.xlabel('Thresholds')
plt.ylabel('Recall on GT corrs')
plt.grid(True)
plt.legend(['SIFT + MAGSAC++'])
```

    <matplotlib.legend.Legend>

![](index_files/figure-commonmark/cell-4-output-2.png)

We can also check per-image-pair results

``` python
plt.figure(figsize=(10,10))
plt.ylim([0,1.05])
plt.xlabel('Thresholds')
plt.ylabel('Recall on GT corrs')
plt.grid(True)


for img_pair, recall in result_dict.items():
    plt.plot(thresholds, recall, '-x', label=img_pair)

plt.legend()
```

    /opt/homebrew/Caskroom/miniforge/base/envs/python39/lib/python3.9/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.
      and should_run_async(code)

    <matplotlib.legend.Legend>

![](index_files/figure-commonmark/cell-5-output-3.png)

### F-estimation benchmark results

I have evaluated several popular methods in this
[Colab](https://colab.research.google.com/drive/1yrCFyEoAc0HyqYCRVvzJDh5kQT2Dc3XA?usp=sharing)

Here is the resulting graphs. ![image.png](index_files/att_00000.png)

If you are interested in adding your methods - open an issue.

## Task 2: finding the correspondence in image 2, given query point in image 1

Check this
[Colab](https://colab.research.google.com/drive/1lfjU7N6kOB-bXEzJiUfiNXl24PwT_-FE?usp=sharing)
for an example of running [COTR](https://github.com/ubc-vision/COTR) on
for the correspondence estimation given the query points.

# Task 3: homography estimation on EVD

``` python
import numpy as np
import cv2
import kornia.feature as KF
import kornia as K
import torch
from kornia_moons.feature import *
from tqdm import tqdm
from wxbs_benchmark.dataset import *
from wxbs_benchmark.evaluation import *
import matplotlib.pyplot as plt


def estimate_H_DISK_LG(img1, img2):
    device = torch.device('cpu')
    config = {"depth_confidence": -1, "width_confidence": -1}
    lg = KF.LightGlueMatcher("disk", config).to(device=device).eval()
    num_features = 2048
    disk = KF.DISK.from_pretrained("depth").to(device)
    timg1 = K.image_to_tensor(img1, False).float()
    if timg1.shape[1] == 1:
        timg1 = K.color.grayscale_to_rgb(timg1)
    timg1 = K.geometry.resize(timg1, (600, 800), antialias=True).to(device)
    timg2 = K.image_to_tensor(img2, False).float()
    if timg2.shape[1] == 1:
        timg2 = K.color.grayscale_to_rgb(timg2)
    timg2 = K.geometry.resize(timg2, (600, 800), antialias=True).to(device)
    
    features1 = disk(timg1, num_features, pad_if_not_divisible=True)[0]
    features2 = disk(timg2, num_features, pad_if_not_divisible=True)[0]
    
    kps1, descs1 = features1.keypoints, features1.descriptors
    kps2, descs2 = features2.keypoints, features2.descriptors

    lafs1 = KF.laf_from_center_scale_ori(kps1[None], 96 * torch.ones(1, len(kps1), 1, 1, device=device))
    lafs2 = KF.laf_from_center_scale_ori(kps2[None], 96 * torch.ones(1, len(kps2), 1, 1, device=device))
    dists, idxs = lg(descs1, descs2, lafs1, lafs2, hw1=timg1.shape[2:], hw2=timg2.shape[2:])
    #snn_ratio, idxs = KF.match_smnn(descs1, descs2, 0.98)
    idxs = idxs.detach().cpu().numpy()
    
    src_pts = kps1.detach().cpu().numpy()[idxs[:,0]].reshape(-1,2)
    src_pts[:, 0] *= (img1.shape[1] / float(timg1.shape[3]) )
    src_pts[:, 1] *= (img1.shape[0] / float(timg1.shape[2]) )

    dst_pts = kps2.detach().cpu().numpy()[idxs[:,1]].reshape(-1,2)
    dst_pts[:, 0] *= (img2.shape[1] / float(timg2.shape[3]) )
    dst_pts[:, 1] *= (img2.shape[0] / float(timg2.shape[2]) )
    try:
        H, _ = cv2.findHomography(src_pts, dst_pts, cv2.USAC_MAGSAC, 0.5, 0.999, 100000)
    except:
        H = np.eye(3)
    if H is None:
        H = np.eye(3)
    return H


Hs = []

dset = EVDDataset('.EVD',  download=True)
for pair_dict in tqdm(dset):
    with torch.inference_mode():
        Hs.append(estimate_H_DISK_LG(pair_dict['img1'],
                                     pair_dict['img2']))
        
result_dict, thresholds = evaluate_Hs(Hs)
```

      0%|                                                    | 0/15 [00:00<?, ?it/s]  7%|██▉                                         | 1/15 [00:01<00:26,  1.90s/it] 13%|█████▊                                      | 2/15 [00:03<00:23,  1.80s/it] 20%|████████▊                                   | 3/15 [00:05<00:20,  1.74s/it] 27%|███████████▋                                | 4/15 [00:07<00:19,  1.73s/it] 33%|██████████████▋                             | 5/15 [00:08<00:16,  1.66s/it] 40%|█████████████████▌                          | 6/15 [00:10<00:15,  1.72s/it] 47%|████████████████████▌                       | 7/15 [00:12<00:13,  1.69s/it] 53%|███████████████████████▍                    | 8/15 [00:13<00:11,  1.71s/it] 60%|██████████████████████████▍                 | 9/15 [00:15<00:10,  1.71s/it] 67%|████████████████████████████▋              | 10/15 [00:17<00:08,  1.71s/it] 73%|███████████████████████████████▌           | 11/15 [00:18<00:06,  1.65s/it] 80%|██████████████████████████████████▍        | 12/15 [00:20<00:05,  1.84s/it] 87%|█████████████████████████████████████▎     | 13/15 [00:22<00:03,  1.79s/it] 93%|████████████████████████████████████████▏  | 14/15 [00:24<00:01,  1.75s/it]100%|███████████████████████████████████████████| 15/15 [00:25<00:00,  1.73s/it]

    Loaded LightGlue model
    Loaded LightGlue model
    Loaded LightGlue model
    Loaded LightGlue model
    Loaded LightGlue model
    Loaded LightGlue model
    Loaded LightGlue model
    Loaded LightGlue model
    Loaded LightGlue model
    Loaded LightGlue model
    Loaded LightGlue model
    Loaded LightGlue model
    Loaded LightGlue model
    Loaded LightGlue model
    Loaded LightGlue model

Now plain DISK

``` python
def estimate_H_DISK_smnn(img1, img2):
    device = torch.device('cpu')
    num_features = 2048
    disk = KF.DISK.from_pretrained("depth").to(device)
    timg1 = K.image_to_tensor(img1, False).float()
    if timg1.shape[1] == 1:
        timg1 = K.color.grayscale_to_rgb(timg1)
    timg1 = K.geometry.resize(timg1, (600, 800), antialias=True).to(device)
    timg2 = K.image_to_tensor(img2, False).float()
    if timg2.shape[1] == 1:
        timg2 = K.color.grayscale_to_rgb(timg2)
    timg2 = K.geometry.resize(timg2, (600, 800), antialias=True).to(device)
    
    features1 = disk(timg1, num_features, pad_if_not_divisible=True)[0]
    features2 = disk(timg2, num_features, pad_if_not_divisible=True)[0]
    
    kps1, descs1 = features1.keypoints, features1.descriptors
    kps2, descs2 = features2.keypoints, features2.descriptors

    dists, idxs = KF.match_smnn(descs1, descs2, 0.98)
    idxs = idxs.detach().cpu().numpy()
    
    src_pts = kps1.detach().cpu().numpy()[idxs[:,0]].reshape(-1,2)
    src_pts[:, 0] *= (img1.shape[1] / float(timg1.shape[3]) )
    src_pts[:, 1] *= (img1.shape[0] / float(timg1.shape[2]) )

    dst_pts = kps2.detach().cpu().numpy()[idxs[:,1]].reshape(-1,2)
    dst_pts[:, 0] *= (img2.shape[1] / float(timg2.shape[3]) )
    dst_pts[:, 1] *= (img2.shape[0] / float(timg2.shape[2]) )
    try:
        H, _ = cv2.findHomography(src_pts, dst_pts, cv2.USAC_MAGSAC, 0.5, 0.999, 100000)
    except:
        H = np.eye(3)
    if H is None:
        H = np.eye(3)
    return H


Hs_plain = []

dset = EVDDataset('.EVD',  download=True)
for pair_dict in tqdm(dset):
    with torch.inference_mode():
        Hs_plain.append(estimate_H_DISK_smnn(pair_dict['img1'],
                                     pair_dict['img2']))
        
result_dict_plain, thresholds = evaluate_Hs(Hs_plain)
```

    100%|███████████████████████████████████████████| 15/15 [00:17<00:00,  1.16s/it]

``` python
plt.figure()
plt.plot(thresholds, result_dict['average'], '-x')
plt.plot(thresholds, result_dict_plain['average'], '-o')

plt.ylim([0,1.05])
plt.xlabel('px thresholds')
plt.ylabel('mAA')
plt.title('Performance on EVD dataset')
plt.grid(True)
plt.legend(['DISK + LightGlue + MAGSAC++', 'DISK + MAGSAC++'])
```

    <matplotlib.legend.Legend>

![](index_files/figure-commonmark/cell-12-output-2.png)

            

Raw data

            {
    "_id": null,
    "home_page": "https://github.com/ducha-aiki/wxbs_benchmark/tree/master/",
    "name": "wxbs-benchmark",
    "maintainer": "",
    "docs_url": null,
    "requires_python": ">=3.6",
    "maintainer_email": "",
    "keywords": "WxBS,image matching,benchmark,image correspondences",
    "author": "Dmytro Mishkin",
    "author_email": "ducha.aiki@gmail.com",
    "download_url": "https://files.pythonhosted.org/packages/45/5f/738360f66cb819c46f7cd403fdf4d41c0949a12111b3cf5672402825ecae/wxbs_benchmark-0.0.4.tar.gz",
    "platform": null,
    "description": "# wxbs_benchmark\n\n<!-- WARNING: THIS FILE WAS AUTOGENERATED! DO NOT EDIT! -->\n\n## Install\n\n`pip install wxbs_benchmark`\n\n## How to use\n\n## Task 1: fundamental matrix estimation\n\nI will show you how to benchmark a simple baseline of OpenCV SIFT +\nMAGSAC++ below.\n\n``` python\nimport numpy as np\nimport cv2\nimport kornia.feature as KF\nimport torch\nfrom kornia_moons.feature import *\nfrom tqdm import tqdm\nfrom wxbs_benchmark.dataset import *\nfrom wxbs_benchmark.evaluation import *\nimport matplotlib.pyplot as plt\n\n\ndef estimate_F_SIFT(img1, img2):\n    det = cv2.SIFT_create(8000, contrastThreshold=-10000, edgeThreshold=10000)\n    kps1, descs1 = det.detectAndCompute(img1, None)\n    kps2, descs2 = det.detectAndCompute(img2, None)\n    snn_ratio, idxs = KF.match_snn(torch.from_numpy(descs1),\n                           torch.from_numpy(descs2), 0.9)\n    tentatives = cv2_matches_from_kornia(snn_ratio, idxs)\n    src_pts = np.float32([ kps1[m.queryIdx].pt for m in tentatives ]).reshape(-1,2)\n    dst_pts = np.float32([ kps2[m.trainIdx].pt for m in tentatives ]).reshape(-1,2)\n    F, _ = cv2.findFundamentalMat(src_pts, dst_pts, cv2.USAC_MAGSAC, 0.25, 0.999, 100000)\n    return F\n\n\nFs = []\nsubset = 'test'\ndset = WxBSDataset('.WxBS', subset=subset, download=True)\nfor pair_dict in tqdm(dset):\n    Fs.append(estimate_F_SIFT(pair_dict['img1'],\n                         pair_dict['img2']))\nresult_dict, thresholds = evaluate_Fs(Fs, subset)\n```\n\n    100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 32/32 [00:11<00:00,  2.67it/s]\n\n``` python\nplt.figure()\nplt.plot(thresholds, result_dict['average'], '-x')\nplt.ylim([0,1.05])\nplt.xlabel('Thresholds')\nplt.ylabel('Recall on GT corrs')\nplt.grid(True)\nplt.legend(['SIFT + MAGSAC++'])\n```\n\n    <matplotlib.legend.Legend>\n\n![](index_files/figure-commonmark/cell-4-output-2.png)\n\nWe can also check per-image-pair results\n\n``` python\nplt.figure(figsize=(10,10))\nplt.ylim([0,1.05])\nplt.xlabel('Thresholds')\nplt.ylabel('Recall on GT corrs')\nplt.grid(True)\n\n\nfor img_pair, recall in result_dict.items():\n    plt.plot(thresholds, recall, '-x', label=img_pair)\n\nplt.legend()\n```\n\n    /opt/homebrew/Caskroom/miniforge/base/envs/python39/lib/python3.9/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n      and should_run_async(code)\n\n    <matplotlib.legend.Legend>\n\n![](index_files/figure-commonmark/cell-5-output-3.png)\n\n### F-estimation benchmark results\n\nI have evaluated several popular methods in this\n[Colab](https://colab.research.google.com/drive/1yrCFyEoAc0HyqYCRVvzJDh5kQT2Dc3XA?usp=sharing)\n\nHere is the resulting graphs. ![image.png](index_files/att_00000.png)\n\nIf you are interested in adding your methods - open an issue.\n\n## Task 2: finding the correspondence in image 2, given query point in image 1\n\nCheck this\n[Colab](https://colab.research.google.com/drive/1lfjU7N6kOB-bXEzJiUfiNXl24PwT_-FE?usp=sharing)\nfor an example of running [COTR](https://github.com/ubc-vision/COTR) on\nfor the correspondence estimation given the query points.\n\n# Task 3: homography estimation on EVD\n\n``` python\nimport numpy as np\nimport cv2\nimport kornia.feature as KF\nimport kornia as K\nimport torch\nfrom kornia_moons.feature import *\nfrom tqdm import tqdm\nfrom wxbs_benchmark.dataset import *\nfrom wxbs_benchmark.evaluation import *\nimport matplotlib.pyplot as plt\n\n\ndef estimate_H_DISK_LG(img1, img2):\n    device = torch.device('cpu')\n    config = {\"depth_confidence\": -1, \"width_confidence\": -1}\n    lg = KF.LightGlueMatcher(\"disk\", config).to(device=device).eval()\n    num_features = 2048\n    disk = KF.DISK.from_pretrained(\"depth\").to(device)\n    timg1 = K.image_to_tensor(img1, False).float()\n    if timg1.shape[1] == 1:\n        timg1 = K.color.grayscale_to_rgb(timg1)\n    timg1 = K.geometry.resize(timg1, (600, 800), antialias=True).to(device)\n    timg2 = K.image_to_tensor(img2, False).float()\n    if timg2.shape[1] == 1:\n        timg2 = K.color.grayscale_to_rgb(timg2)\n    timg2 = K.geometry.resize(timg2, (600, 800), antialias=True).to(device)\n    \n    features1 = disk(timg1, num_features, pad_if_not_divisible=True)[0]\n    features2 = disk(timg2, num_features, pad_if_not_divisible=True)[0]\n    \n    kps1, descs1 = features1.keypoints, features1.descriptors\n    kps2, descs2 = features2.keypoints, features2.descriptors\n\n    lafs1 = KF.laf_from_center_scale_ori(kps1[None], 96 * torch.ones(1, len(kps1), 1, 1, device=device))\n    lafs2 = KF.laf_from_center_scale_ori(kps2[None], 96 * torch.ones(1, len(kps2), 1, 1, device=device))\n    dists, idxs = lg(descs1, descs2, lafs1, lafs2, hw1=timg1.shape[2:], hw2=timg2.shape[2:])\n    #snn_ratio, idxs = KF.match_smnn(descs1, descs2, 0.98)\n    idxs = idxs.detach().cpu().numpy()\n    \n    src_pts = kps1.detach().cpu().numpy()[idxs[:,0]].reshape(-1,2)\n    src_pts[:, 0] *= (img1.shape[1] / float(timg1.shape[3]) )\n    src_pts[:, 1] *= (img1.shape[0] / float(timg1.shape[2]) )\n\n    dst_pts = kps2.detach().cpu().numpy()[idxs[:,1]].reshape(-1,2)\n    dst_pts[:, 0] *= (img2.shape[1] / float(timg2.shape[3]) )\n    dst_pts[:, 1] *= (img2.shape[0] / float(timg2.shape[2]) )\n    try:\n        H, _ = cv2.findHomography(src_pts, dst_pts, cv2.USAC_MAGSAC, 0.5, 0.999, 100000)\n    except:\n        H = np.eye(3)\n    if H is None:\n        H = np.eye(3)\n    return H\n\n\nHs = []\n\ndset = EVDDataset('.EVD',  download=True)\nfor pair_dict in tqdm(dset):\n    with torch.inference_mode():\n        Hs.append(estimate_H_DISK_LG(pair_dict['img1'],\n                                     pair_dict['img2']))\n        \nresult_dict, thresholds = evaluate_Hs(Hs)\n```\n\n      0%|                                                    | 0/15 [00:00<?, ?it/s]  7%|\u2588\u2588\u2589                                         | 1/15 [00:01<00:26,  1.90s/it] 13%|\u2588\u2588\u2588\u2588\u2588\u258a                                      | 2/15 [00:03<00:23,  1.80s/it] 20%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258a                                   | 3/15 [00:05<00:20,  1.74s/it] 27%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258b                                | 4/15 [00:07<00:19,  1.73s/it] 33%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258b                             | 5/15 [00:08<00:16,  1.66s/it] 40%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258c                          | 6/15 [00:10<00:15,  1.72s/it] 47%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258c                       | 7/15 [00:12<00:13,  1.69s/it] 53%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258d                    | 8/15 [00:13<00:11,  1.71s/it] 60%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258d                 | 9/15 [00:15<00:10,  1.71s/it] 67%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258b              | 10/15 [00:17<00:08,  1.71s/it] 73%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258c           | 11/15 [00:18<00:06,  1.65s/it] 80%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258d        | 12/15 [00:20<00:05,  1.84s/it] 87%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258e     | 13/15 [00:22<00:03,  1.79s/it] 93%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258f  | 14/15 [00:24<00:01,  1.75s/it]100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 15/15 [00:25<00:00,  1.73s/it]\n\n    Loaded LightGlue model\n    Loaded LightGlue model\n    Loaded LightGlue model\n    Loaded LightGlue model\n    Loaded LightGlue model\n    Loaded LightGlue model\n    Loaded LightGlue model\n    Loaded LightGlue model\n    Loaded LightGlue model\n    Loaded LightGlue model\n    Loaded LightGlue model\n    Loaded LightGlue model\n    Loaded LightGlue model\n    Loaded LightGlue model\n    Loaded LightGlue model\n\nNow plain DISK\n\n``` python\ndef estimate_H_DISK_smnn(img1, img2):\n    device = torch.device('cpu')\n    num_features = 2048\n    disk = KF.DISK.from_pretrained(\"depth\").to(device)\n    timg1 = K.image_to_tensor(img1, False).float()\n    if timg1.shape[1] == 1:\n        timg1 = K.color.grayscale_to_rgb(timg1)\n    timg1 = K.geometry.resize(timg1, (600, 800), antialias=True).to(device)\n    timg2 = K.image_to_tensor(img2, False).float()\n    if timg2.shape[1] == 1:\n        timg2 = K.color.grayscale_to_rgb(timg2)\n    timg2 = K.geometry.resize(timg2, (600, 800), antialias=True).to(device)\n    \n    features1 = disk(timg1, num_features, pad_if_not_divisible=True)[0]\n    features2 = disk(timg2, num_features, pad_if_not_divisible=True)[0]\n    \n    kps1, descs1 = features1.keypoints, features1.descriptors\n    kps2, descs2 = features2.keypoints, features2.descriptors\n\n    dists, idxs = KF.match_smnn(descs1, descs2, 0.98)\n    idxs = idxs.detach().cpu().numpy()\n    \n    src_pts = kps1.detach().cpu().numpy()[idxs[:,0]].reshape(-1,2)\n    src_pts[:, 0] *= (img1.shape[1] / float(timg1.shape[3]) )\n    src_pts[:, 1] *= (img1.shape[0] / float(timg1.shape[2]) )\n\n    dst_pts = kps2.detach().cpu().numpy()[idxs[:,1]].reshape(-1,2)\n    dst_pts[:, 0] *= (img2.shape[1] / float(timg2.shape[3]) )\n    dst_pts[:, 1] *= (img2.shape[0] / float(timg2.shape[2]) )\n    try:\n        H, _ = cv2.findHomography(src_pts, dst_pts, cv2.USAC_MAGSAC, 0.5, 0.999, 100000)\n    except:\n        H = np.eye(3)\n    if H is None:\n        H = np.eye(3)\n    return H\n\n\nHs_plain = []\n\ndset = EVDDataset('.EVD',  download=True)\nfor pair_dict in tqdm(dset):\n    with torch.inference_mode():\n        Hs_plain.append(estimate_H_DISK_smnn(pair_dict['img1'],\n                                     pair_dict['img2']))\n        \nresult_dict_plain, thresholds = evaluate_Hs(Hs_plain)\n```\n\n    100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 15/15 [00:17<00:00,  1.16s/it]\n\n``` python\nplt.figure()\nplt.plot(thresholds, result_dict['average'], '-x')\nplt.plot(thresholds, result_dict_plain['average'], '-o')\n\nplt.ylim([0,1.05])\nplt.xlabel('px thresholds')\nplt.ylabel('mAA')\nplt.title('Performance on EVD dataset')\nplt.grid(True)\nplt.legend(['DISK + LightGlue + MAGSAC++', 'DISK + MAGSAC++'])\n```\n\n    <matplotlib.legend.Legend>\n\n![](index_files/figure-commonmark/cell-12-output-2.png)\n",
    "bugtrack_url": null,
    "license": "Apache Software License 2.0",
    "summary": "Code for benchmarking image matchers on WxBS dataset",
    "version": "0.0.4",
    "project_urls": {
        "Homepage": "https://github.com/ducha-aiki/wxbs_benchmark/tree/master/"
    },
    "split_keywords": [
        "wxbs",
        "image matching",
        "benchmark",
        "image correspondences"
    ],
    "urls": [
        {
            "comment_text": "",
            "digests": {
                "blake2b_256": "91478b61c1094bf2b8c16578c1ffa128dc82067dd6f2c5548e8b612ae6a08f2f",
                "md5": "ac89ba82b7e4e8119976a0f575f0e245",
                "sha256": "86e644d9e8175bf598976c885ca105d25962cb362a04a1590f0ceef0eee58aa1"
            },
            "downloads": -1,
            "filename": "wxbs_benchmark-0.0.4-py3-none-any.whl",
            "has_sig": false,
            "md5_digest": "ac89ba82b7e4e8119976a0f575f0e245",
            "packagetype": "bdist_wheel",
            "python_version": "py3",
            "requires_python": ">=3.6",
            "size": 13827,
            "upload_time": "2023-10-31T14:28:50",
            "upload_time_iso_8601": "2023-10-31T14:28:50.827977Z",
            "url": "https://files.pythonhosted.org/packages/91/47/8b61c1094bf2b8c16578c1ffa128dc82067dd6f2c5548e8b612ae6a08f2f/wxbs_benchmark-0.0.4-py3-none-any.whl",
            "yanked": false,
            "yanked_reason": null
        },
        {
            "comment_text": "",
            "digests": {
                "blake2b_256": "455f738360f66cb819c46f7cd403fdf4d41c0949a12111b3cf5672402825ecae",
                "md5": "f921ab06df1a72e26bbc77f3c4a25faa",
                "sha256": "fd7e2c4d9a7ac1d679d7b68effc696bb7aaba0889e8782473952e418feee8577"
            },
            "downloads": -1,
            "filename": "wxbs_benchmark-0.0.4.tar.gz",
            "has_sig": false,
            "md5_digest": "f921ab06df1a72e26bbc77f3c4a25faa",
            "packagetype": "sdist",
            "python_version": "source",
            "requires_python": ">=3.6",
            "size": 17655,
            "upload_time": "2023-10-31T14:28:52",
            "upload_time_iso_8601": "2023-10-31T14:28:52.493476Z",
            "url": "https://files.pythonhosted.org/packages/45/5f/738360f66cb819c46f7cd403fdf4d41c0949a12111b3cf5672402825ecae/wxbs_benchmark-0.0.4.tar.gz",
            "yanked": false,
            "yanked_reason": null
        }
    ],
    "upload_time": "2023-10-31 14:28:52",
    "github": true,
    "gitlab": false,
    "bitbucket": false,
    "codeberg": false,
    "github_user": "ducha-aiki",
    "github_project": "wxbs_benchmark",
    "github_not_found": true,
    "lcname": "wxbs-benchmark"
}
        
Elapsed time: 0.12661s