Adding basic CI and fixing minor problems (#17)

* Adding integration tests

Create python-tests.yml
Adding ceres and eigen dependencies

Adding coverage

Adding parameterized

Fixing GPU torch problems

Avoiding explicit transfer of the model to CUDA

Adding tests for eval utils and standardizing the batched/non-batched behaviour (only tested with homographies)

Testing failures in pytest

Fixing dummy test

Removing MishaKav/pytest-coverage-comment

Fixing problems in MegaDepth evaluation

Fixing division by 0 and converting function to torch

Fixing lint errors

Fixing isort errors

* Addressing PR comments

* Addressing second round of PR comments
main
Iago Suárez 2023-10-18 00:09:58 -08:00 committed by GitHub
parent 692c72f94c
commit aa7727675e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 332 additions and 58 deletions

30
.github/workflows/python-tests.yml vendored Normal file
View File

@ -0,0 +1,30 @@
name: Python Tests
on:
push:
branches:
- main
pull_request:
types: [ assigned, opened, synchronize, reopened ]
jobs:
build:
name: Run Python Tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.10'
cache: 'pip'
- name: Install dependencies
run: |
sudo apt-get remove libunwind-14-dev || true
sudo apt-get install -y libceres-dev libeigen3-dev
python -m pip install --upgrade pip
python -m pip install pytest pytest-cov
python -m pip install torch torchvision --index-url https://download.pytorch.org/whl/cpu
python -m pip install -e .[dev]
python -m pip install -e .[extra]
- name: Test with pytest
run: |
set -o pipefail
pytest --junitxml=pytest.xml --cov=gluefactory tests/

BIN
assets/boat1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 519 KiB

BIN
assets/boat2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 580 KiB

View File

@ -5,6 +5,7 @@ from pprint import pprint
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
import torch
from omegaconf import OmegaConf from omegaconf import OmegaConf
from tqdm import tqdm from tqdm import tqdm
@ -12,6 +13,7 @@ from ..datasets import get_dataset
from ..models.cache_loader import CacheLoader from ..models.cache_loader import CacheLoader
from ..settings import EVAL_PATH from ..settings import EVAL_PATH
from ..utils.export_predictions import export_predictions from ..utils.export_predictions import export_predictions
from ..utils.tensor import map_tensor
from ..utils.tools import AUCMetric from ..utils.tools import AUCMetric
from ..visualization.viz2d import plot_cumulative from ..visualization.viz2d import plot_cumulative
from .eval_pipeline import EvalPipeline from .eval_pipeline import EvalPipeline
@ -105,9 +107,11 @@ class HPatchesPipeline(EvalPipeline):
cache_loader = CacheLoader({"path": str(pred_file), "collate": None}).eval() cache_loader = CacheLoader({"path": str(pred_file), "collate": None}).eval()
for i, data in enumerate(tqdm(loader)): for i, data in enumerate(tqdm(loader)):
pred = cache_loader(data) pred = cache_loader(data)
# Remove batch dimension
data = map_tensor(data, lambda t: torch.squeeze(t, dim=0))
# add custom evaluations here # add custom evaluations here
if "keypoints0" in pred: if "keypoints0" in pred:
results_i = eval_matches_homography(data, pred, {}) results_i = eval_matches_homography(data, pred)
results_i = {**results_i, **eval_homography_dlt(data, pred)} results_i = {**results_i, **eval_homography_dlt(data, pred)}
else: else:
results_i = {} results_i = {}

View File

@ -1,11 +1,12 @@
import kornia
import numpy as np import numpy as np
import torch import torch
from kornia.geometry.homography import find_homography_dlt
from ..geometry.epipolar import generalized_epi_dist, relative_pose_error from ..geometry.epipolar import generalized_epi_dist, relative_pose_error
from ..geometry.gt_generation import IGNORE_FEATURE from ..geometry.gt_generation import IGNORE_FEATURE
from ..geometry.homography import homography_corner_error, sym_homography_error from ..geometry.homography import homography_corner_error, sym_homography_error
from ..robust_estimators import load_estimator from ..robust_estimators import load_estimator
from ..utils.tensor import index_batch
from ..utils.tools import AUCMetric from ..utils.tools import AUCMetric
@ -26,6 +27,16 @@ def get_matches_scores(kpts0, kpts1, matches0, mscores0):
return pts0, pts1, scores return pts0, pts1, scores
def eval_per_batch_item(data: dict, pred: dict, eval_f, *args, **kwargs):
# Batched data
results = [
eval_f(data_i, pred_i, *args, **kwargs)
for data_i, pred_i in zip(index_batch(data), index_batch(pred))
]
# Return a dictionary of lists with the evaluation of each item
return {k: [r[k] for r in results] for k in results[0].keys()}
def eval_matches_epipolar(data: dict, pred: dict) -> dict: def eval_matches_epipolar(data: dict, pred: dict) -> dict:
check_keys_recursive(data, ["view0", "view1", "T_0to1"]) check_keys_recursive(data, ["view0", "view1", "T_0to1"])
check_keys_recursive( check_keys_recursive(
@ -58,23 +69,25 @@ def eval_matches_epipolar(data: dict, pred: dict) -> dict:
return results return results
def eval_matches_homography(data: dict, pred: dict, conf) -> dict: def eval_matches_homography(data: dict, pred: dict) -> dict:
check_keys_recursive(data, ["H_0to1"]) check_keys_recursive(data, ["H_0to1"])
check_keys_recursive( check_keys_recursive(
pred, ["keypoints0", "keypoints1", "matches0", "matching_scores0"] pred, ["keypoints0", "keypoints1", "matches0", "matching_scores0"]
) )
H_gt = data["H_0to1"] H_gt = data["H_0to1"]
if H_gt.ndim > 2:
return eval_per_batch_item(data, pred, eval_matches_homography)
kp0, kp1 = pred["keypoints0"], pred["keypoints1"] kp0, kp1 = pred["keypoints0"], pred["keypoints1"]
m0, scores0 = pred["matches0"], pred["matching_scores0"] m0, scores0 = pred["matches0"], pred["matching_scores0"]
pts0, pts1, scores = get_matches_scores(kp0, kp1, m0, scores0) pts0, pts1, scores = get_matches_scores(kp0, kp1, m0, scores0)
err = sym_homography_error(pts0, pts1, H_gt[0]) err = sym_homography_error(pts0, pts1, H_gt)
results = {} results = {}
results["prec@1px"] = (err < 1).float().mean().nan_to_num().item() results["prec@1px"] = (err < 1).float().mean().nan_to_num().item()
results["prec@3px"] = (err < 3).float().mean().nan_to_num().item() results["prec@3px"] = (err < 3).float().mean().nan_to_num().item()
results["num_matches"] = pts0.shape[0] results["num_matches"] = pts0.shape[0]
results["num_keypoints"] = (kp0.shape[0] + kp1.shape[0]) / 2.0 results["num_keypoints"] = (kp0.shape[0] + kp1.shape[0]) / 2.0
return results return results
@ -84,7 +97,7 @@ def eval_relative_pose_robust(data, pred, conf):
pred, ["keypoints0", "keypoints1", "matches0", "matching_scores0"] pred, ["keypoints0", "keypoints1", "matches0", "matching_scores0"]
) )
T_gt = data["T_0to1"][0] T_gt = data["T_0to1"]
kp0, kp1 = pred["keypoints0"], pred["keypoints1"] kp0, kp1 = pred["keypoints0"], pred["keypoints1"]
m0, scores0 = pred["matches0"], pred["matching_scores0"] m0, scores0 = pred["matches0"], pred["matching_scores0"]
pts0, pts1, scores = get_matches_scores(kp0, kp1, m0, scores0) pts0, pts1, scores = get_matches_scores(kp0, kp1, m0, scores0)
@ -107,9 +120,8 @@ def eval_relative_pose_robust(data, pred, conf):
else: else:
# R, t, inl = ret # R, t, inl = ret
M = est["M_0to1"] M = est["M_0to1"]
R, t = M.numpy()
inl = est["inliers"].numpy() inl = est["inliers"].numpy()
r_error, t_error = relative_pose_error(T_gt, R, t) t_error, r_error = relative_pose_error(T_gt, M.R, M.t)
results["rel_pose_error"] = max(r_error, t_error) results["rel_pose_error"] = max(r_error, t_error)
results["ransac_inl"] = np.sum(inl) results["ransac_inl"] = np.sum(inl)
results["ransac_inl%"] = np.mean(inl) results["ransac_inl%"] = np.mean(inl)
@ -119,6 +131,9 @@ def eval_relative_pose_robust(data, pred, conf):
def eval_homography_robust(data, pred, conf): def eval_homography_robust(data, pred, conf):
H_gt = data["H_0to1"] H_gt = data["H_0to1"]
if H_gt.ndim > 2:
return eval_per_batch_item(data, pred, eval_relative_pose_robust, conf)
estimator = load_estimator("homography", conf["estimator"])(conf) estimator = load_estimator("homography", conf["estimator"])(conf)
data_ = {} data_ = {}
@ -158,24 +173,26 @@ def eval_homography_robust(data, pred, conf):
return results return results
def eval_homography_dlt(data, pred, *args): def eval_homography_dlt(data, pred):
H_gt = data["H_0to1"] H_gt = data["H_0to1"]
H_inf = torch.ones_like(H_gt) * float("inf") H_inf = torch.ones_like(H_gt) * float("inf")
kp0, kp1 = pred["keypoints0"], pred["keypoints1"] kp0, kp1 = pred["keypoints0"], pred["keypoints1"]
m0, scores0 = pred["matches0"], pred["matching_scores0"] m0, scores0 = pred["matches0"], pred["matching_scores0"]
pts0, pts1, scores = get_matches_scores(kp0, kp1, m0, scores0) pts0, pts1, scores = get_matches_scores(kp0, kp1, m0, scores0)
scores = scores.to(pts0)
results = {} results = {}
try: try:
Hdlt = kornia.geometry.homography.find_homography_dlt( if H_gt.ndim == 2:
pts0[None], pts1[None], scores[None].to(pts0) pts0, pts1, scores = pts0[None], pts1[None], scores[None]
)[0] h_dlt = find_homography_dlt(pts0, pts1, scores)
if H_gt.ndim == 2:
h_dlt = h_dlt[0]
except AssertionError: except AssertionError:
Hdlt = H_inf h_dlt = H_inf
error_dlt = homography_corner_error(Hdlt, H_gt, data["view0"]["image_size"]) error_dlt = homography_corner_error(h_dlt, H_gt, data["view0"]["image_size"])
results["H_error_dlt"] = error_dlt.item() results["H_error_dlt"] = error_dlt.item()
return results return results

View File

@ -1,4 +1,3 @@
import numpy as np
import torch import torch
from .utils import skew_symmetric, to_homogeneous from .utils import skew_symmetric, to_homogeneous
@ -124,39 +123,33 @@ def decompose_essential_matrix(E):
# pose errors # pose errors
# TODO: port to torch and batch # TODO: test for batched data
def angle_error_mat(R1, R2): def angle_error_mat(R1, R2):
cos = (np.trace(np.dot(R1.T, R2)) - 1) / 2 cos = (torch.trace(torch.einsum("...ij, ...jk -> ...ik", R1.T, R2)) - 1) / 2
cos = np.clip(cos, -1.0, 1.0) # numercial errors can make it out of bounds cos = torch.clip(cos, -1.0, 1.0) # numerical errors can make it out of bounds
return np.rad2deg(np.abs(np.arccos(cos))) return torch.rad2deg(torch.abs(torch.arccos(cos)))
def angle_error_vec(v1, v2): def angle_error_vec(v1, v2, eps=1e-10):
n = np.linalg.norm(v1) * np.linalg.norm(v2) n = torch.clip(v1.norm(dim=-1) * v2.norm(dim=-1), min=eps)
return np.rad2deg(np.arccos(np.clip(np.dot(v1, v2) / n, -1.0, 1.0))) v1v2 = (v1 * v2).sum(dim=-1) # dot product in the last dimension
return torch.rad2deg(torch.arccos(torch.clip(v1v2 / n, -1.0, 1.0)))
def compute_pose_error(T_0to1, R, t): def relative_pose_error(T_0to1, R, t, ignore_gt_t_thr=0.0, eps=1e-10):
R_gt = T_0to1[:3, :3] if isinstance(T_0to1, torch.Tensor):
t_gt = T_0to1[:3, 3] R_gt, t_gt = T_0to1[:3, :3], T_0to1[:3, 3]
error_t = angle_error_vec(t, t_gt) else:
error_t = np.minimum(error_t, 180 - error_t) # ambiguity of E estimation R_gt, t_gt = T_0to1.R, T_0to1.t
error_R = angle_error_mat(R, R_gt) R_gt, t_gt = torch.squeeze(R_gt), torch.squeeze(t_gt)
return error_t, error_R
def relative_pose_error(T_0to1, R, t, ignore_gt_t_thr=0.0):
# angle error between 2 vectors # angle error between 2 vectors
R_gt, t_gt = T_0to1.numpy() t_err = angle_error_vec(t, t_gt, eps)
n = np.linalg.norm(t) * np.linalg.norm(t_gt) t_err = torch.minimum(t_err, 180 - t_err) # handle E ambiguity
t_err = np.rad2deg(np.arccos(np.clip(np.dot(t, t_gt) / n, -1.0, 1.0))) if t_gt.norm() < ignore_gt_t_thr: # pure rotation is challenging
t_err = np.minimum(t_err, 180 - t_err) # handle E ambiguity
if np.linalg.norm(t_gt) < ignore_gt_t_thr: # pure rotation is challenging
t_err = 0 t_err = 0
# angle error between 2 rotation matrices # angle error between 2 rotation matrices
cos = (np.trace(np.dot(R.T, R_gt)) - 1) / 2 r_err = angle_error_mat(R, R_gt)
cos = np.clip(cos, -1.0, 1.0) # handle numercial errors
R_err = np.rad2deg(np.abs(np.arccos(cos)))
return t_err, R_err return t_err, r_err

View File

@ -164,7 +164,8 @@ def warp_points_torch(points, H, inverse=True):
The inverse is used to be coherent with tf.contrib.image.transform The inverse is used to be coherent with tf.contrib.image.transform
Arguments: Arguments:
points: batched list of N points, shape (B, N, 2). points: batched list of N points, shape (B, N, 2).
homography: batched or not (shapes (B, 3, 3) and (3, 3) respectively). H: batched or not (shapes (B, 3, 3) and (3, 3) respectively).
inverse: Whether to multiply the points by H or the inverse of H
Returns: a Tensor of shape (B, N, 2) containing the new coordinates of the warps. Returns: a Tensor of shape (B, N, 2) containing the new coordinates of the warps.
""" """
@ -333,7 +334,7 @@ def sym_homography_error_all(kpts0, kpts1, H):
def homography_corner_error(T, T_gt, image_size): def homography_corner_error(T, T_gt, image_size):
W, H = image_size[:, 0], image_size[:, 1] W, H = image_size[..., 0], image_size[..., 1]
corners0 = torch.Tensor([[0, 0], [W, 0], [W, H], [0, H]]).float().to(T) corners0 = torch.Tensor([[0, 0], [W, 0], [W, H], [0, H]]).float().to(T)
corners1_gt = from_homogeneous(to_homogeneous(corners0) @ T_gt.transpose(-1, -2)) corners1_gt = from_homogeneous(to_homogeneous(corners0) @ T_gt.transpose(-1, -2))
corners1 = from_homogeneous(to_homogeneous(corners0) @ T.transpose(-1, -2)) corners1 = from_homogeneous(to_homogeneous(corners0) @ T.transpose(-1, -2))

View File

@ -23,6 +23,7 @@ def from_homogeneous(points, eps=0.0):
"""Remove the homogeneous dimension of N-dimensional points. """Remove the homogeneous dimension of N-dimensional points.
Args: Args:
points: torch.Tensor or numpy.ndarray with size (..., N+1). points: torch.Tensor or numpy.ndarray with size (..., N+1).
eps: Epsilon value to prevent zero division.
Returns: Returns:
A torch.Tensor or numpy ndarray with size (..., N). A torch.Tensor or numpy ndarray with size (..., N).
""" """

View File

@ -119,7 +119,7 @@ class GlueStick(BaseModel):
"Loading GlueStick model from " f'"{self.url.format(conf.version)}"' "Loading GlueStick model from " f'"{self.url.format(conf.version)}"'
) )
state_dict = torch.hub.load_state_dict_from_url( state_dict = torch.hub.load_state_dict_from_url(
self.url.format(conf.version), file_name=fname self.url.format(conf.version), file_name=fname, map_location="cpu"
) )
if "model" in state_dict: if "model" in state_dict:

View File

@ -17,7 +17,7 @@ class LightGlue(BaseModel):
def _init(self, conf): def _init(self, conf):
dconf = OmegaConf.to_container(conf) dconf = OmegaConf.to_container(conf)
self.net = LightGlue_(dconf.pop("features"), **dconf).cuda() self.net = LightGlue_(dconf.pop("features"), **dconf)
self.set_initialized() self.set_initialized()
def _forward(self, data): def _forward(self, data):

View File

@ -7,6 +7,7 @@ from homography_est import (
ransac_point_line_homography, ransac_point_line_homography,
) )
from ...utils.tensor import batch_to_numpy
from ..base_estimator import BaseEstimator from ..base_estimator import BaseEstimator
@ -50,19 +51,20 @@ class PointLineHomographyEstimator(BaseEstimator):
pass pass
def _forward(self, data): def _forward(self, data):
m_features = {
"kpts0": data["m_kpts1"].numpy() if "m_kpts1" in data else None,
"kpts1": data["m_kpts0"].numpy() if "m_kpts0" in data else None,
"lines0": data["m_lines1"].numpy() if "m_lines1" in data else None,
"lines1": data["m_lines0"].numpy() if "m_lines0" in data else None,
}
feat = data["m_kpts0"] if "m_kpts0" in data else data["m_lines0"] feat = data["m_kpts0"] if "m_kpts0" in data else data["m_lines0"]
data = batch_to_numpy(data)
m_features = {
"kpts0": data["m_kpts1"] if "m_kpts1" in data else None,
"kpts1": data["m_kpts0"] if "m_kpts0" in data else None,
"lines0": data["m_lines1"] if "m_lines1" in data else None,
"lines1": data["m_lines0"] if "m_lines0" in data else None,
}
M = H_estimation_hybrid(**m_features, tol_px=self.conf.ransac_th) M = H_estimation_hybrid(**m_features, tol_px=self.conf.ransac_th)
success = M is not None success = M is not None
if not success: if not success:
M = torch.eye(3, device=feat.device, dtype=feat.dtype) M = torch.eye(3, device=feat.device, dtype=feat.dtype)
else: else:
M = torch.tensor(M).to(feat) M = torch.from_numpy(M).to(feat)
estimation = { estimation = {
"success": success, "success": success,

View File

@ -16,8 +16,8 @@ class PoseLibHomographyEstimator(BaseEstimator):
def _forward(self, data): def _forward(self, data):
pts0, pts1 = data["m_kpts0"], data["m_kpts1"] pts0, pts1 = data["m_kpts0"], data["m_kpts1"]
M, info = poselib.estimate_homography( M, info = poselib.estimate_homography(
pts0.numpy(), pts0.detach().cpu().numpy(),
pts1.numpy(), pts1.detach().cpu().numpy(),
{ {
"max_reproj_error": self.conf.ransac_th, "max_reproj_error": self.conf.ransac_th,
**OmegaConf.to_container(self.conf.options), **OmegaConf.to_container(self.conf.options),

View File

@ -40,3 +40,9 @@ def rbd(data: dict) -> dict:
k: v[0] if isinstance(v, (torch.Tensor, np.ndarray, list)) else v k: v[0] if isinstance(v, (torch.Tensor, np.ndarray, list)) else v
for k, v in data.items() for k, v in data.items()
} }
def index_batch(tensor_dict):
batch_size = len(next(iter(tensor_dict.values())))
for i in range(batch_size):
yield map_tensor(tensor_dict, lambda t: t[i])

View File

@ -38,12 +38,12 @@ urls = {Repository = "https://github.com/cvg/glue-factory"}
[project.optional-dependencies] [project.optional-dependencies]
extra = [ extra = [
"pycolmap", "pycolmap",
"poselib @ git+https://github.com/PoseLib/PoseLib.git", "poselib @ git+https://github.com/PoseLib/PoseLib.git@9c8f3ca1baba69e19726cc7caded574873ec1f9e",
"pytlsd @ git+https://github.com/iago-suarez/pytlsd.git", "pytlsd @ git+https://github.com/iago-suarez/pytlsd.git@v0.0.5",
"deeplsd @ git+https://github.com/cvg/DeepLSD.git", "deeplsd @ git+https://github.com/cvg/DeepLSD.git",
"homography_est @ git+https://github.com/rpautrat/homography_est.git", "homography_est @ git+https://github.com/rpautrat/homography_est.git@17b200d528e6aa8ac61a878a29265bf5f9d36c41",
] ]
dev = ["black", "flake8", "isort"] dev = ["black", "flake8", "isort", "parameterized"]
[tool.setuptools.packages.find] [tool.setuptools.packages.find]
include = ["gluefactory*"] include = ["gluefactory*"]

0
tests/__init__.py Normal file
View File

88
tests/test_eval_utils.py Normal file
View File

@ -0,0 +1,88 @@
import unittest
import torch
from gluefactory.eval.utils import eval_matches_homography
from gluefactory.geometry.homography import warp_points_torch
class TestEvalUtils(unittest.TestCase):
@staticmethod
def default_pts():
return torch.tensor(
[
[10.0, 10.0],
[10.0, 20.0],
[20.0, 20.0],
[20.0, 10.0],
]
)
@staticmethod
def default_pred(kps0, kps1):
return {
"keypoints0": kps0,
"keypoints1": kps1,
"matches0": torch.arange(len(kps0)),
"matching_scores0": torch.ones(len(kps1)),
}
def test_eval_matches_homography_trivial(self):
data = {"H_0to1": torch.eye(3)}
kps = self.default_pts()
pred = self.default_pred(kps, kps)
results = eval_matches_homography(data, pred)
self.assertEqual(results["prec@1px"], 1)
self.assertEqual(results["prec@3px"], 1)
self.assertEqual(results["num_matches"], 4)
self.assertEqual(results["num_keypoints"], 4)
def test_eval_matches_homography_real(self):
data = {"H_0to1": torch.tensor([[1.5, 0.2, 21], [-0.3, 1.6, 33], [0, 0, 1.0]])}
kps0 = self.default_pts()
kps1 = warp_points_torch(kps0, data["H_0to1"], inverse=False)
pred = self.default_pred(kps0, kps1)
results = eval_matches_homography(data, pred)
self.assertEqual(results["prec@1px"], 1)
self.assertEqual(results["prec@3px"], 1)
def test_eval_matches_homography_real_outliers(self):
data = {"H_0to1": torch.tensor([[1.5, 0.2, 21], [-0.3, 1.6, 33], [0, 0, 1.0]])}
kps0 = self.default_pts()
kps0 = torch.cat([kps0, torch.tensor([[5.0, 5.0]])])
kps1 = warp_points_torch(kps0, data["H_0to1"], inverse=False)
# Move one keypoint 1.5 pixels away in x and y
kps1[-1] += 1.5
pred = self.default_pred(kps0, kps1)
results = eval_matches_homography(data, pred)
self.assertAlmostEqual(results["prec@1px"], 0.8)
self.assertAlmostEqual(results["prec@3px"], 1.0)
def test_eval_matches_homography_batched(self):
H0 = torch.tensor([[1.5, 0.2, 21], [-0.3, 1.6, 33], [0, 0, 1.0]])
H1 = torch.tensor([[0.7, 0.1, -5], [-0.1, 0.65, 13], [0, 0, 1.0]])
data = {"H_0to1": torch.stack([H0, H1])}
kps0 = torch.stack([self.default_pts(), self.default_pts().flip(0)])
kps1 = warp_points_torch(kps0, data["H_0to1"], inverse=False)
# In the first element of the batch there is one outlier
kps1[0, -1] += 5
matches0 = torch.stack([torch.arange(4), torch.arange(4)])
# In the second element of the batch there is only 2 matches
matches0[1, :2] = -1
pred = {
"keypoints0": kps0,
"keypoints1": kps1,
"matches0": matches0,
"matching_scores0": torch.ones_like(matches0),
}
results = eval_matches_homography(data, pred)
self.assertAlmostEqual(results["prec@1px"][0], 0.75)
self.assertAlmostEqual(results["prec@1px"][1], 1.0)
self.assertAlmostEqual(results["num_matches"][0], 4)
self.assertAlmostEqual(results["num_matches"][1], 2)

132
tests/test_integration.py Normal file
View File

@ -0,0 +1,132 @@
import unittest
from collections import namedtuple
from os.path import splitext
import cv2
import matplotlib.pyplot as plt
import torch.cuda
from kornia import image_to_tensor
from omegaconf import OmegaConf
from parameterized import parameterized
from torch import Tensor
from gluefactory import logger
from gluefactory.eval.utils import (
eval_homography_dlt,
eval_homography_robust,
eval_matches_homography,
)
from gluefactory.models.two_view_pipeline import TwoViewPipeline
from gluefactory.settings import root
from gluefactory.utils.image import ImagePreprocessor
from gluefactory.utils.tensor import map_tensor
from gluefactory.utils.tools import set_seed
from gluefactory.visualization.viz2d import (
plot_color_line_matches,
plot_images,
plot_matches,
)
def create_input_data(cv_img0, cv_img1, device):
img0 = image_to_tensor(cv_img0).float() / 255
img1 = image_to_tensor(cv_img1).float() / 255
ip = ImagePreprocessor({})
data = {"view0": ip(img0), "view1": ip(img1)}
data = map_tensor(
data,
lambda t: t[None].to(device)
if isinstance(t, Tensor)
else torch.from_numpy(t)[None].to(device),
)
return data
ExpectedResults = namedtuple("ExpectedResults", ("num_matches", "prec3px", "h_error"))
class TestIntegration(unittest.TestCase):
methods_to_test = [
("superpoint+NN.yaml", "poselib", ExpectedResults(1300, 0.8, 1.0)),
("superpoint-open+NN.yaml", "poselib", ExpectedResults(1300, 0.8, 1.0)),
(
"superpoint+lsd+gluestick.yaml",
"homography_est",
ExpectedResults(1300, 0.8, 1.0),
),
(
"superpoint+lightglue-official.yaml",
"poselib",
ExpectedResults(1300, 0.8, 1.0),
),
]
visualize = False
@parameterized.expand(methods_to_test)
@torch.no_grad()
def test_real_homography(self, conf_file, estimator, exp_results):
set_seed(0)
model_path = root / "gluefactory" / "configs" / conf_file
img_path0 = root / "assets" / "boat1.png"
img_path1 = root / "assets" / "boat2.png"
h_gt = torch.tensor(
[
[0.85799, 0.21669, 9.4839],
[-0.21177, 0.85855, 130.48],
[1.5015e-06, 9.2033e-07, 1],
]
)
device = "cuda" if torch.cuda.is_available() else "cpu"
gs = TwoViewPipeline(OmegaConf.load(model_path).model).to(device).eval()
cv_img0, cv_img1 = cv2.imread(str(img_path0)), cv2.imread(str(img_path1))
data = create_input_data(cv_img0, cv_img1, device)
pred = gs(data)
pred = map_tensor(
pred, lambda t: torch.squeeze(t, dim=0) if isinstance(t, Tensor) else t
)
data["H_0to1"] = h_gt.to(device)
data["H_1to0"] = torch.linalg.inv(h_gt).to(device)
results = eval_matches_homography(data, pred)
results = {**results, **eval_homography_dlt(data, pred)}
results = {
**results,
**eval_homography_robust(
data,
pred,
{"estimator": estimator},
),
}
logger.info(results)
self.assertGreater(results["num_matches"], exp_results.num_matches)
self.assertGreater(results["prec@3px"], exp_results.prec3px)
self.assertLess(results["H_error_ransac"], exp_results.h_error)
if self.visualize:
pred = map_tensor(
pred, lambda t: t.cpu().numpy() if isinstance(t, Tensor) else t
)
kp0, kp1 = pred["keypoints0"], pred["keypoints1"]
m0 = pred["matches0"]
valid0 = m0 != -1
kpm0, kpm1 = kp0[valid0], kp1[m0[valid0]]
plot_images([cv_img0, cv_img1])
plot_matches(kpm0, kpm1, a=0.0)
plt.savefig(f"{splitext(conf_file)[0]}_point_matches.svg")
if "lines0" in pred and "lines1" in pred:
lines0, lines1 = pred["lines0"], pred["lines1"]
lm0 = pred["line_matches0"]
lvalid0 = lm0 != -1
linem0, linem1 = lines0[lvalid0], lines1[lm0[lvalid0]]
plot_images([cv_img0, cv_img1])
plot_color_line_matches([linem0, linem1])
plt.savefig(f"{splitext(conf_file)[0]}_line_matches.svg")
plt.show()