SIFT+ALIKED updates (#26)

* fix links

* add sift/aliked eval configs

* add SIFT/ALIKED results on megadepth

* update sift config

* add SIFT with 4K keypoints results

* cleanup SIFT (see LightGlue)

* fix compatibility with LightGlue

* tiny visualization fix

* fix sift kornia

* Update sift configs
main
Philipp Lindenberger 2023-10-19 19:06:06 +02:00 committed by GitHub
parent aa7727675e
commit 0c75e76fd6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 260 additions and 207 deletions

View File

@ -66,8 +66,8 @@ Here are the results as Area Under the Curve (AUC) of the homography error at 1
| Methods | DLT | [OpenCV](../gluefactory/robust_estimators/homography/opencv.py) | [PoseLib](../gluefactory/robust_estimators/homography/poselib.py) |
| ------------------------------------------------------------ | ------------------ | ------------------ | ------------------ |
| [SuperPoint + SuperGlue](../gluefactory/configs/superpoint+superglue.yaml) | 32.1 / 65.0 / 75.7 | 32.9 / 55.7 / 68.0 | 37.0 / 68.2 / 78.7 |
| [SuperPoint + LightGlue](../gluefactory/configs/superpoint+lightglue.yaml) | 35.1 / 67.2 / 77.6 | 34.2 / 57.9 / 69.9 | 37.1 / 67.4 / 77.8 |
| [SuperPoint + SuperGlue](../gluefactory/configs/superpoint+superglue-official.yaml) | 32.1 / 65.0 / 75.7 | 32.9 / 55.7 / 68.0 | 37.0 / 68.2 / 78.7 |
| [SuperPoint + LightGlue](../gluefactory/configs/superpoint+lightglue-official.yaml) | 35.1 / 67.2 / 77.6 | 34.2 / 57.9 / 69.9 | 37.1 / 67.4 / 77.8 |
</details>
@ -159,8 +159,11 @@ Here are the results as Area Under the Curve (AUC) of the pose error at 5/10/20
| Methods | [pycolmap](../gluefactory/robust_estimators/relative_pose/pycolmap.py) | [OpenCV](../gluefactory/robust_estimators/relative_pose/opencv.py) | [PoseLib](../gluefactory/robust_estimators/relative_pose/poselib.py) |
| ------------------------------------------------------------ | ------------------ | ------------------ | ------------------ |
| [SuperPoint + SuperGlue](../gluefactory/configs/superpoint+superglue.yaml) | 54.4 / 70.4 / 82.4 | 48.7 / 65.6 / 79.0 | 64.8 / 77.9 / 87.0 |
| [SuperPoint + LightGlue](../gluefactory/configs/superpoint+lightglue.yaml) | 56.7 / 72.4 / 83.7 | 51.0 / 68.1 / 80.7 | 66.8 / 79.3 / 87.9 |
| [SuperPoint + SuperGlue](../gluefactory/configs/superpoint+superglue-official.yaml) | 54.4 / 70.4 / 82.4 | 48.7 / 65.6 / 79.0 | 64.8 / 77.9 / 87.0 |
| [SuperPoint + LightGlue](../gluefactory/configs/superpoint+lightglue-official.yaml) | 56.7 / 72.4 / 83.7 | 51.0 / 68.1 / 80.7 | 66.8 / 79.3 / 87.9 |
| [SIFT (2K) + LightGlue](../gluefactory/configs/sift+lightglue-official.yaml) | ? / ? / ? | 43.5 / 61.5 / 75.9 | 60.4 / 74.3 / 84.5 |
| [SIFT (4K) + LightGlue](../gluefactory/configs/sift+lightglue-official.yaml) | ? / ? / ? | 49.9 / 67.3 / 80.3 | 65.9 / 78.6 / 87.4 |
| [ALIKED + LightGlue](../gluefactory/configs/aliked+lightglue-official.yaml) | ? / ? / ? | 51.5 / 68.1 / 80.4 | 66.3 / 78.7 / 87.5 |
| [SuperPoint + GlueStick](../gluefactory/configs/superpoint+lsd+gluestick.yaml) | 53.2 / 69.8 / 81.9 | 46.3 / 64.2 / 78.1 | 64.4 / 77.5 / 86.5 |
</details>

View File

@ -0,0 +1,28 @@
model:
name: two_view_pipeline
extractor:
name: extractors.aliked
max_num_keypoints: 2048
detection_threshold: 0.0
matcher:
name: matchers.lightglue_pretrained
features: aliked
depth_confidence: -1
width_confidence: -1
filter_threshold: 0.1
benchmarks:
megadepth1500:
data:
preprocessing:
side: long
resize: 1600
eval:
estimator: opencv
ransac_th: 0.5
hpatches:
eval:
estimator: opencv
ransac_th: 0.5
model:
extractor:
max_num_keypoints: 1024 # overwrite config above

View File

@ -0,0 +1,28 @@
model:
name: two_view_pipeline
extractor:
name: extractors.sift
backend: pycolmap_cuda
max_num_keypoints: 4096
matcher:
name: matchers.lightglue_pretrained
features: sift
depth_confidence: -1
width_confidence: -1
filter_threshold: 0.1
benchmarks:
megadepth1500:
data:
preprocessing:
side: long
resize: 1600
eval:
estimator: opencv
ransac_th: 0.5
hpatches:
eval:
estimator: opencv
ransac_th: 0.5
model:
extractor:
max_num_keypoints: 1024 # overwrite config above

View File

@ -14,10 +14,10 @@ model:
name: two_view_pipeline
extractor:
name: extractors.sift
detector: pycolmap_cuda
backend: pycolmap_cuda
max_num_keypoints: 1024
force_num_keypoints: True
detection_threshold: 0.0001
nms_radius: 3
trainable: False
ground_truth:
name: matchers.homography_matcher
@ -46,3 +46,6 @@ benchmarks:
eval:
estimator: opencv
ransac_th: 0.5
model:
extractor:
nms_radius: 0

View File

@ -25,10 +25,10 @@ model:
name: two_view_pipeline
extractor:
name: extractors.sift
detector: pycolmap_cuda
backend: pycolmap_cuda
max_num_keypoints: 2048
force_num_keypoints: True
detection_threshold: 0.0001
nms_radius: 3
trainable: False
matcher:
name: matchers.lightglue
@ -62,6 +62,9 @@ benchmarks:
preprocessing:
side: long
resize: 1600
model:
extractor:
nms_radius: 0
eval:
estimator: opencv
ransac_th: 0.5
@ -72,3 +75,4 @@ benchmarks:
model:
extractor:
max_num_keypoints: 1024
nms_radius: 0

View File

@ -1,238 +1,233 @@
import warnings
import cv2
import numpy as np
import pycolmap
import torch
from omegaconf import OmegaConf
from scipy.spatial import KDTree
from kornia.color import rgb_to_grayscale
from packaging import version
try:
import pycolmap
except ImportError:
pycolmap = None
from ..base_model import BaseModel
from ..utils.misc import pad_to_length
EPS = 1e-6
def filter_dog_point(points, scales, angles, image_shape, nms_radius, scores=None):
h, w = image_shape
ij = np.round(points - 0.5).astype(int).T[::-1]
# Remove duplicate points (identical coordinates).
# Pick highest scale or score
s = scales if scores is None else scores
buffer = np.zeros((h, w))
np.maximum.at(buffer, tuple(ij), s)
keep = np.where(buffer[tuple(ij)] == s)[0]
# Pick lowest angle (arbitrary).
ij = ij[:, keep]
buffer[:] = np.inf
o_abs = np.abs(angles[keep])
np.minimum.at(buffer, tuple(ij), o_abs)
mask = buffer[tuple(ij)] == o_abs
ij = ij[:, mask]
keep = keep[mask]
if nms_radius > 0:
# Apply NMS on the remaining points
buffer[:] = 0
buffer[tuple(ij)] = s[keep] # scores or scale
local_max = torch.nn.functional.max_pool2d(
torch.from_numpy(buffer).unsqueeze(0),
kernel_size=nms_radius * 2 + 1,
stride=1,
padding=nms_radius,
).squeeze(0)
is_local_max = buffer == local_max.numpy()
keep = keep[is_local_max[tuple(ij)]]
return keep
def sift_to_rootsift(x):
x = x / (np.linalg.norm(x, ord=1, axis=-1, keepdims=True) + EPS)
x = np.sqrt(x.clip(min=EPS))
x = x / (np.linalg.norm(x, axis=-1, keepdims=True) + EPS)
return x
def sift_to_rootsift(x: torch.Tensor, eps=1e-6) -> torch.Tensor:
x = torch.nn.functional.normalize(x, p=1, dim=-1, eps=eps)
x.clip_(min=eps).sqrt_()
return torch.nn.functional.normalize(x, p=2, dim=-1, eps=eps)
# from OpenGlue
def nms_keypoints(kpts: np.ndarray, responses: np.ndarray, radius: float) -> np.ndarray:
# TODO: add approximate tree
kd_tree = KDTree(kpts)
sorted_idx = np.argsort(-responses)
kpts_to_keep_idx = []
removed_idx = set()
for idx in sorted_idx:
# skip point if it was already removed
if idx in removed_idx:
continue
kpts_to_keep_idx.append(idx)
point = kpts[idx]
neighbors = kd_tree.query_ball_point(point, r=radius)
# Variable `neighbors` contains the `point` itself
removed_idx.update(neighbors)
mask = np.zeros((kpts.shape[0],), dtype=bool)
mask[kpts_to_keep_idx] = True
return mask
def detect_kpts_opencv(
features: cv2.Feature2D, image: np.ndarray, describe: bool = True
) -> np.ndarray:
def run_opencv_sift(features: cv2.Feature2D, image: np.ndarray) -> np.ndarray:
"""
Detect keypoints using OpenCV Detector.
Optionally, perform NMS and filter top-response keypoints.
Optionally, perform description.
Args:
features: OpenCV based keypoints detector and descriptor
image: Grayscale image of uint8 data type
describe: flag indicating whether to simultaneously compute descriptors
Returns:
kpts: 1D array of detected cv2.KeyPoint
keypoints: 1D array of detected cv2.KeyPoint
scores: 1D array of responses
descriptors: 1D array of descriptors
"""
if describe:
kpts, descriptors = features.detectAndCompute(image, None)
else:
kpts = features.detect(image, None)
kpts = np.array(kpts)
responses = np.array([k.response for k in kpts], dtype=np.float32)
# select all
top_score_idx = ...
pts = np.array([k.pt for k in kpts], dtype=np.float32)
scales = np.array([k.size for k in kpts], dtype=np.float32)
angles = np.array([k.angle for k in kpts], dtype=np.float32)
spts = np.concatenate([pts, scales[..., None], angles[..., None]], -1)
if describe:
return spts[top_score_idx], responses[top_score_idx], descriptors[top_score_idx]
else:
return spts[top_score_idx], responses[top_score_idx]
detections, descriptors = features.detectAndCompute(image, None)
points = np.array([k.pt for k in detections], dtype=np.float32)
scores = np.array([k.response for k in detections], dtype=np.float32)
scales = np.array([k.size for k in detections], dtype=np.float32)
angles = np.deg2rad(np.array([k.angle for k in detections], dtype=np.float32))
return points, scores, scales, angles, descriptors
class SIFT(BaseModel):
default_conf = {
"has_detector": True,
"has_descriptor": True,
"descriptor_dim": 128,
"pycolmap_options": {
"first_octave": 0,
"peak_threshold": 0.005,
"edge_threshold": 10,
},
"rootsift": True,
"nms_radius": None,
"max_num_keypoints": -1,
"max_num_keypoints_val": None,
"nms_radius": 0, # None to disable filtering entirely.
"max_num_keypoints": 4096,
"backend": "opencv", # in {opencv, pycolmap, pycolmap_cpu, pycolmap_cuda}
"detection_threshold": 0.0066667, # from COLMAP
"edge_threshold": 10,
"first_octave": -1, # only used by pycolmap, the default of COLMAP
"num_octaves": 4,
"force_num_keypoints": False,
"randomize_keypoints_training": False,
"detector": "pycolmap", # ['pycolmap', 'pycolmap_cpu', 'pycolmap_cuda', 'cv2']
"detection_threshold": None,
}
required_data_keys = ["image"]
def _init(self, conf):
self.sift = None # lazy loading
@torch.no_grad()
def extract_features(self, image):
image_np = image.cpu().numpy()[0]
assert image.shape[0] == 1
assert image_np.min() >= -EPS and image_np.max() <= 1 + EPS
detector = str(self.conf.detector)
if self.sift is None and detector.startswith("pycolmap"):
options = OmegaConf.to_container(self.conf.pycolmap_options)
backend = self.conf.backend
if backend.startswith("pycolmap"):
if pycolmap is None:
raise ImportError(
"Cannot find module pycolmap: install it with pip"
"or use backend=opencv."
)
options = {
"peak_threshold": self.conf.detection_threshold,
"edge_threshold": self.conf.edge_threshold,
"first_octave": self.conf.first_octave,
"num_octaves": self.conf.num_octaves,
"normalization": pycolmap.Normalization.L2, # L1_ROOT is buggy.
}
device = (
"auto" if detector == "pycolmap" else detector.replace("pycolmap_", "")
"auto" if backend == "pycolmap" else backend.replace("pycolmap_", "")
)
if self.conf.rootsift == "rootsift":
options["normalization"] = pycolmap.Normalization.L1_ROOT
if (
backend == "pycolmap_cpu" or not pycolmap.has_cuda
) and pycolmap.__version__ < "0.5.0":
warnings.warn(
"The pycolmap CPU SIFT is buggy in version < 0.5.0, "
"consider upgrading pycolmap or use the CUDA version.",
stacklevel=1,
)
else:
options["normalization"] = pycolmap.Normalization.L2
if self.conf.detection_threshold is not None:
options["peak_threshold"] = self.conf.detection_threshold
options["max_num_features"] = self.conf.max_num_keypoints
options["max_num_features"] = self.conf.max_num_keypoints
self.sift = pycolmap.Sift(options=options, device=device)
elif self.sift is None and self.conf.detector == "cv2":
self.sift = cv2.SIFT_create(contrastThreshold=self.conf.detection_threshold)
elif backend == "opencv":
self.sift = cv2.SIFT_create(
contrastThreshold=self.conf.detection_threshold,
nfeatures=self.conf.max_num_keypoints,
edgeThreshold=self.conf.edge_threshold,
nOctaveLayers=self.conf.num_octaves,
)
else:
backends = {"opencv", "pycolmap", "pycolmap_cpu", "pycolmap_cuda"}
raise ValueError(
f"Unknown backend: {backend} not in " f"{{{','.join(backends)}}}."
)
if detector.startswith("pycolmap"):
keypoints, scores, descriptors = self.sift.extract(image_np)
elif detector == "cv2":
def extract_single_image(self, image: torch.Tensor):
image_np = image.cpu().numpy().squeeze(0)
if self.conf.backend.startswith("pycolmap"):
if version.parse(pycolmap.__version__) >= version.parse("0.5.0"):
detections, descriptors = self.sift.extract(image_np)
scores = None # Scores are not exposed by COLMAP anymore.
else:
detections, scores, descriptors = self.sift.extract(image_np)
keypoints = detections[:, :2] # Keep only (x, y).
scales, angles = detections[:, -2:].T
if scores is not None and (
self.conf.backend == "pycolmap_cpu" or not pycolmap.has_cuda
):
# Set the scores as a combination of abs. response and scale.
scores = np.abs(scores) * scales
elif self.conf.backend == "opencv":
# TODO: Check if opencv keypoints are already in corner convention
keypoints, scores, descriptors = detect_kpts_opencv(
keypoints, scores, scales, angles, descriptors = run_opencv_sift(
self.sift, (image_np * 255.0).astype(np.uint8)
)
pred = {
"keypoints": keypoints,
"scales": scales,
"oris": angles,
"descriptors": descriptors,
}
if scores is not None:
pred["keypoint_scores"] = scores
# sometimes pycolmap returns points outside the image. We remove them
if self.conf.backend.startswith("pycolmap"):
is_inside = (
pred["keypoints"] + 0.5 < np.array([image_np.shape[-2:][::-1]])
).all(-1)
pred = {k: v[is_inside] for k, v in pred.items()}
if self.conf.nms_radius is not None:
mask = nms_keypoints(keypoints[:, :2], scores, self.conf.nms_radius)
keypoints = keypoints[mask]
scores = scores[mask]
descriptors = descriptors[mask]
keep = filter_dog_point(
pred["keypoints"],
pred["scales"],
pred["oris"],
image_np.shape,
self.conf.nms_radius,
pred["keypoint_scores"],
)
pred = {k: v[keep] for k, v in pred.items()}
scales = keypoints[:, 2]
oris = np.rad2deg(keypoints[:, 3])
if self.conf.has_descriptor:
# We still renormalize because COLMAP does not normalize well,
# maybe due to numerical errors
if self.conf.rootsift:
descriptors = sift_to_rootsift(descriptors)
descriptors = torch.from_numpy(descriptors)
keypoints = torch.from_numpy(keypoints[:, :2]) # keep only x, y
scales = torch.from_numpy(scales)
oris = torch.from_numpy(oris)
scores = torch.from_numpy(scores)
# Keep the k keypoints with highest score
max_kps = self.conf.max_num_keypoints
# for val we allow different
if not self.training and self.conf.max_num_keypoints_val is not None:
max_kps = self.conf.max_num_keypoints_val
if max_kps is not None and max_kps > 0:
if self.conf.randomize_keypoints_training and self.training:
# instead of selecting top-k, sample k by score weights
raise NotImplementedError
elif max_kps < scores.shape[0]:
# TODO: check that the scores from PyCOLMAP are 100% correct,
# follow https://github.com/mihaidusmanu/pycolmap/issues/8
indices = torch.topk(scores, max_kps).indices
keypoints = keypoints[indices]
scales = scales[indices]
oris = oris[indices]
scores = scores[indices]
if self.conf.has_descriptor:
descriptors = descriptors[indices]
pred = {k: torch.from_numpy(v) for k, v in pred.items()}
if scores is not None:
# Keep the k keypoints with highest score
num_points = self.conf.max_num_keypoints
if num_points is not None and len(pred["keypoints"]) > num_points:
indices = torch.topk(pred["keypoint_scores"], num_points).indices
pred = {k: v[indices] for k, v in pred.items()}
if self.conf.force_num_keypoints:
keypoints = pad_to_length(
keypoints,
max_kps,
num_points = min(self.conf.max_num_keypoints, len(pred["keypoints"]))
pred["keypoints"] = pad_to_length(
pred["keypoints"],
num_points,
-2,
mode="random_c",
bounds=(0, min(image.shape[1:])),
)
scores = pad_to_length(scores, max_kps, -1, mode="zeros")
scales = pad_to_length(scales, max_kps, -1, mode="zeros")
oris = pad_to_length(oris, max_kps, -1, mode="zeros")
if self.conf.has_descriptor:
descriptors = pad_to_length(descriptors, max_kps, -2, mode="zeros")
pred = {
"keypoints": keypoints,
"scales": scales,
"oris": oris,
"keypoint_scores": scores,
}
if self.conf.has_descriptor:
pred["descriptors"] = descriptors
pred["scales"] = pad_to_length(pred["scales"], num_points, -1, mode="zeros")
pred["oris"] = pad_to_length(pred["oris"], num_points, -1, mode="zeros")
pred["descriptors"] = pad_to_length(
pred["descriptors"], num_points, -2, mode="zeros"
)
if pred["keypoint_scores"] is not None:
scores = pad_to_length(
pred["keypoint_scores"], num_points, -1, mode="zeros"
)
return pred
@torch.no_grad()
def _forward(self, data):
pred = {
"keypoints": [],
"scales": [],
"oris": [],
"keypoint_scores": [],
"descriptors": [],
}
def _forward(self, data: dict) -> dict:
image = data["image"]
if image.shape[1] == 3: # RGB
scale = image.new_tensor([0.299, 0.587, 0.114]).view(1, 3, 1, 1)
image = (image * scale).sum(1, keepdim=True).cpu()
for k in range(image.shape[0]):
if image.shape[1] == 3:
image = rgb_to_grayscale(image)
device = image.device
image = image.cpu()
pred = []
for k in range(len(image)):
img = image[k]
if "image_size" in data.keys():
# avoid extracting points in padded areas
w, h = data["image_size"][k]
img = img[:, :h, :w]
p = self.extract_features(img)
for k, v in p.items():
pred[k].append(v)
if (image.shape[0] == 1) or self.conf.force_num_keypoints:
pred = {k: torch.stack(pred[k], 0) for k in pred.keys()}
pred = {k: pred[k].to(device=data["image"].device) for k in pred.keys()}
pred["oris"] = torch.deg2rad(pred["oris"])
p = self.extract_single_image(img)
pred.append(p)
pred = {k: torch.stack([p[k] for p in pred], 0).to(device) for k in pred[0]}
if self.conf.rootsift:
pred["descriptors"] = sift_to_rootsift(pred["descriptors"])
return pred
def loss(self, pred, data):

View File

@ -24,8 +24,8 @@ class KorniaSIFT(BaseModel):
def _forward(self, data):
lafs, scores, descriptors = self.sift(data["image"])
keypoints = kornia.feature.get_laf_center(lafs)
scales = kornia.feature.get_laf_scale(lafs)
oris = kornia.feature.get_laf_orientation(lafs)
scales = kornia.feature.get_laf_scale(lafs).squeeze(-1).squeeze(-1)
oris = kornia.feature.get_laf_orientation(lafs).squeeze(-1)
pred = {
"keypoints": keypoints, # @TODO: confirm keypoints are in corner convention
"scales": scales,

View File

@ -21,13 +21,14 @@ class LightGlue(BaseModel):
self.set_initialized()
def _forward(self, data):
required_keys = ["keypoints", "descriptors", "scales", "oris"]
view0 = {
**{k: data[k + "0"] for k in ["keypoints", "descriptors"]},
**data["view0"],
**{k: data[k + "0"] for k in required_keys if (k + "0") in data},
}
view1 = {
**{k: data[k + "1"] for k in ["keypoints", "descriptors"]},
**data["view1"],
**{k: data[k + "1"] for k in required_keys if (k + "1") in data},
}
return self.net({"image0": view0, "image1": view1})

View File

@ -37,14 +37,13 @@ configs = {
},
},
"cv2-sift": {
"name": f"r{resize}_cv2-SIFT-k{n_kpts}",
"name": f"r{resize}_opencv-SIFT-k{n_kpts}",
"keys": ["keypoints", "descriptors", "keypoint_scores", "oris", "scales"],
"gray": True,
"conf": {
"name": "extractors.sift",
"max_num_keypoints": 4096,
"detection_threshold": 0.001,
"detector": "cv2",
"backend": "opencv",
},
},
"pycolmap-sift": {
@ -54,11 +53,7 @@ configs = {
"conf": {
"name": "extractors.sift",
"max_num_keypoints": n_kpts,
"detection_threshold": 0.0001,
"detector": "pycolmap",
"pycolmap_options": {
"first_octave": -1,
},
"backend": "pycolmap",
},
},
"pycolmap-sift-gpu": {
@ -68,11 +63,7 @@ configs = {
"conf": {
"name": "extractors.sift",
"max_num_keypoints": n_kpts,
"detection_threshold": 0.0066666,
"detector": "pycolmap_cuda",
"pycolmap_options": {
"first_octave": -1,
},
"backend": "pycolmap_cuda",
"nms_radius": 3,
},
},

View File

@ -208,14 +208,14 @@ def plot_matches(kpts0, kpts1, color=None, lw=1.5, ps=4, a=1.0, labels=None, axe
kpts0[:, 1],
c=color,
s=ps,
label=None if labels is None else labels[0],
label=None if labels is None or len(labels) == 0 else labels[0],
)
ax1.scatter(
kpts1[:, 0],
kpts1[:, 1],
c=color,
s=ps,
label=None if labels is None else labels[1],
label=None if labels is None or len(labels) == 0 else labels[1],
)