Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
89 changes: 89 additions & 0 deletions meshroom/aliceVision/GeometricFilterApplying.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
__version__ = "1.0"

from meshroom.core import desc
from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL


class GeometricFilterApplying(desc.AVCommandLineNode):
commandLine = 'aliceVision_geometricFilterApplying {allParams}'
size = desc.DynamicNodeSize('input')
parallelization = desc.Parallelization(blockSize=20)
commandLineRange = '--rangeIteration {rangeIteration} --rangeBlocksCount {rangeBlocksCount}'

category = 'Sparse Reconstruction'
documentation = '''
Apply precomputed transforms to matches to filter geometric matches
'''

inputs = [
desc.File(
name="input",
label="SfMData",
description="Input SfMData file.",
value="",
),
desc.ListAttribute(
elementDesc=desc.File(
name="featuresFolder",
label="Features Folder",
description="Folder containing some extracted features and descriptors.",
value="",
),
name="featuresFolders",
label="Features Folders",
description="Folder(s) containing the extracted features and descriptors.",
exposed=True,
),
desc.ListAttribute(
elementDesc=desc.File(
name="matchesFolder",
label="Matches Folder",
description="Folder containing some matches.",
value="",
),
name="matchesFolders",
label="Matches Folders",
description="Folder(s) in which computed matches are stored.",
exposed=True,
),
desc.File(
name="filters",
label="Filters Folder",
description="Path to a folder in which the computed filters are stored.",
value="",
exposed=True
),
desc.ChoiceParam(
name="describerTypes",
label="Describer Types",
description="Describer types used to describe an image.",
values=DESCRIBER_TYPES,
value=["dspsift"],
exclusive=False,
joinChar=",",
exposed=True,
),
desc.IntParam(
name="maxMatches",
label="Max Matches",
description="Maximum number of matches to keep.",
value=0,
range=(0, 10000, 1),
advanced=True,
),
desc.ChoiceParam(
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
values=VERBOSE_LEVEL,
value="info",
),
]
outputs = [
desc.File(
name="output",
label="Matches Folder",
description="Path to a folder in which the computed matches are stored.",
value="{nodeCacheFolder}",
),
]
104 changes: 104 additions & 0 deletions meshroom/aliceVision/GeometricFilterEstimating.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
__version__ = "1.0"

from meshroom.core import desc
from meshroom.core.utils import DESCRIBER_TYPES, VERBOSE_LEVEL


class GeometricFilterEstimating(desc.AVCommandLineNode):
commandLine = 'aliceVision_geometricFilterEstimating {allParams}'
size = desc.DynamicNodeSize('input')
parallelization = desc.Parallelization(blockSize=20)
commandLineRange = '--rangeIteration {rangeIteration} --rangeBlocksCount {rangeBlocksCount}'

category = 'Sparse Reconstruction'
documentation = '''
It performs a geometric filtering of the photometric match candidates.
It uses the features positions in the images to make a geometric filtering by using epipolar geometry in an outlier detection framework
called RANSAC (RANdom SAmple Consensus). It randomly selects a small set of feature correspondences and compute the fundamental (or essential) matrix,
then it checks the number of features that validates this model and iterate through the RANSAC framework.

## Online
[https://alicevision.org/#photogrammetry/feature_matching](https://alicevision.org/#photogrammetry/feature_matching)
'''

inputs = [
desc.File(
name="input",
label="SfMData",
description="Input SfMData file.",
value="",
),
desc.ListAttribute(
elementDesc=desc.File(
name="featuresFolder",
label="Features Folder",
description="Folder containing some extracted features and descriptors.",
value="",
),
name="featuresFolders",
label="Features Folders",
description="Folder(s) containing the extracted features and descriptors.",
exposed=True,
),
desc.ListAttribute(
elementDesc=desc.File(
name="matchesFolder",
label="Matches Folder",
description="Folder containing some matches.",
value="",
),
name="matchesFolders",
label="Matches Folders",
description="Folder(s) in which computed matches are stored.",
exposed=True,
),
desc.ChoiceParam(
name="describerTypes",
label="Describer Types",
description="Describer types used to describe an image.",
values=DESCRIBER_TYPES,
value=["dspsift"],
exclusive=False,
joinChar=",",
exposed=True,
),
desc.IntParam(
name="maxIteration",
label="Max Iterations",
description="Maximum number of iterations allowed in the Ransac step.",
value=50000,
range=(1, 100000, 1),
advanced=True,
),
desc.FloatParam(
name="geometricError",
label="Geometric Validation Error",
description="Maximum error (in pixels) allowed for features matching during geometric verification",
value=0.0,
range=(0.0, 10.0, 0.1),
advanced=True,
),
desc.IntParam(
name="maxMatches",
label="Max Matches",
description="Maximum number of matches to keep.",
value=0,
range=(0, 10000, 1),
advanced=True,
),
desc.ChoiceParam(
name="verboseLevel",
label="Verbose Level",
description="Verbosity level (fatal, error, warning, info, debug, trace).",
values=VERBOSE_LEVEL,
value="info",
),
]
outputs = [
desc.File(
name="output",
label="Filters Folder",
description="Path to a folder in which the computed filters are stored.",
value="{nodeCacheFolder}",
),
]
5 changes: 3 additions & 2 deletions meshroom/aliceVision/ImageMatching.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,10 @@ class ImageMatching(desc.AVCommandLineNode):
" - SequentialAndVocabularyTree: Combines sequential approach with VocTree to enable connections between keyframes at different times.\n"
" - Exhaustive: Export all image pairs.\n"
" - Frustum: If images have known poses, computes the intersection between cameras frustums to create the list of image pairs.\n"
" - FrustumOrVocabularyTree: If images have known poses, use frustum intersection else use VocabularyTree.\n",
" - FrustumOrVocabularyTree: If images have known poses, use frustum intersection else use VocabularyTree.\n"
" - Mirror: Try to match images with themselves. \n",
value="SequentialAndVocabularyTree",
values=["VocabularyTree", "Sequential", "SequentialAndVocabularyTree", "Exhaustive", "Frustum", "FrustumOrVocabularyTree"],
values=["VocabularyTree", "Sequential", "SequentialAndVocabularyTree", "Exhaustive", "Frustum", "FrustumOrVocabularyTree", "Mirror"],
),
desc.File(
name="tree",
Expand Down
9 changes: 9 additions & 0 deletions src/aliceVision/feature/feature.i
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,15 @@
%include <aliceVision/config.hpp>
%include <aliceVision/global.i>

namespace std
{
#ifdef LINUXPLATFORM
typedef long unsigned int size_t;
#else
typedef long unsigned long size_t;
#endif
}

%{
#include <aliceVision/feature/Regions.hpp>
#include <aliceVision/feature/imageDescriberCommon.hpp>
Expand Down
12 changes: 12 additions & 0 deletions src/aliceVision/imageMatching/ImageMatching.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@ std::string EImageMatchingMethod_enumToString(EImageMatchingMethod m)
return "Frustum";
case EImageMatchingMethod::FRUSTUM_OR_VOCABULARYTREE:
return "FrustumOrVocabularyTree";
case EImageMatchingMethod::MIRROR:
return "Mirror";
}
throw std::out_of_range("Invalid EImageMatchingMethod enum: " + std::to_string(int(m)));
}
Expand All @@ -61,6 +63,8 @@ EImageMatchingMethod EImageMatchingMethod_stringToEnum(const std::string& m)
return EImageMatchingMethod::FRUSTUM;
if (mode == "frustumorvocabularytree")
return EImageMatchingMethod::FRUSTUM_OR_VOCABULARYTREE;
if (mode == "mirror")
return EImageMatchingMethod::MIRROR;

throw std::out_of_range("Invalid EImageMatchingMethod: " + m);
}
Expand Down Expand Up @@ -185,6 +189,14 @@ void generateSequentialMatches(const sfmData::SfMData& sfmData, size_t nbMatches
}
}

void generateMirrorsMatches(const sfmData::SfMData& sfmData, OrderedPairList& outPairList)
{
for (const auto& [index, _] : sfmData.getViews())
{
outPairList[index].insert(index);
}
}

void generateAllMatchesInOneMap(const std::set<IndexT>& viewIds, OrderedPairList& outPairList)
{
for (const IndexT imgA : viewIds)
Expand Down
4 changes: 3 additions & 1 deletion src/aliceVision/imageMatching/ImageMatching.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,8 @@ enum class EImageMatchingMethod
SEQUENTIAL = 2,
SEQUENTIAL_AND_VOCABULARYTREE = 3,
FRUSTUM = 4,
FRUSTUM_OR_VOCABULARYTREE = 5
FRUSTUM_OR_VOCABULARYTREE = 5,
MIRROR = 6,
};

/**
Expand Down Expand Up @@ -116,6 +117,7 @@ EImageMatchingMode EImageMatchingMode_stringToEnum(const std::string& modeMultiS
void convertAllMatchesToPairList(const PairList& allMatches, std::size_t numMatches, OrderedPairList& outPairList);

void generateSequentialMatches(const sfmData::SfMData& sfmData, size_t nbMatches, OrderedPairList& outPairList);
void generateMirrorsMatches(const sfmData::SfMData& sfmData, OrderedPairList& outPairList);
void generateAllMatchesInOneMap(const std::set<IndexT>& viewIds, OrderedPairList& outPairList);
void generateAllMatchesBetweenTwoMap(const std::set<IndexT>& viewIdsA, const std::set<IndexT>& viewIdsB, OrderedPairList& outPairList);

Expand Down
3 changes: 3 additions & 0 deletions src/aliceVision/matchingImageCollection/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ set(matching_collection_images_files_headers
ImagePairListIO.hpp
geometricFilterUtils.hpp
pairBuilder.hpp
GeometricInfo.hpp
)

# Sources
Expand All @@ -26,6 +27,7 @@ set(matching_collection_images_files_sources
geometricFilterUtils.cpp
ImagePairListIO.cpp
pairBuilder.cpp
GeometricInfo.cpp
)

alicevision_add_library(aliceVision_matchingImageCollection
Expand All @@ -38,6 +40,7 @@ alicevision_add_library(aliceVision_matchingImageCollection
aliceVision_sfmData
Boost::boost
Boost::timer
Boost::json
PRIVATE_LINKS
aliceVision_system
${CERES_LIBRARIES}
Expand Down
64 changes: 61 additions & 3 deletions src/aliceVision/matchingImageCollection/GeometricFilter.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <aliceVision/matching/MatchesCollections.hpp>
#include <aliceVision/matchingImageCollection/GeometricFilterMatrix.hpp>
#include <aliceVision/system/ProgressDisplay.hpp>
#include <aliceVision/matchingImageCollection/GeometricInfo.hpp>

#include <map>
#include <random>
Expand All @@ -28,7 +29,7 @@ using namespace aliceVision::matching;
* or all the pairs and regions correspondences contained in the putativeMatches set.
* Allow to keep only geometrically coherent matches.
* It discards pairs that do not lead to a valid robust model estimation.
* @param[out] geometricMatches
* @param[out] out_geometricMatches
* @param[in] sfmData
* @param[in] regionsPerView
* @param[in] functor
Expand All @@ -39,7 +40,7 @@ using namespace aliceVision::matching;
*/
template<typename GeometryFunctor>
void robustModelEstimation(PairwiseMatches& out_geometricMatches,
const sfmData::SfMData* sfmData,
const sfmData::SfMData& sfmData,
const feature::RegionsPerView& regionsPerView,
const GeometryFunctor& functor,
const PairwiseMatches& putativeMatches,
Expand Down Expand Up @@ -73,7 +74,6 @@ void robustModelEstimation(PairwiseMatches& out_geometricMatches,
{
MatchesPerDescType guidedGeometricInliers;
geometricFilter.Geometry_guided_matching(sfmData, regionsPerView, imagePair, distanceRatio, guidedGeometricInliers);
// ALICEVISION_LOG_DEBUG("#before/#after: " << putative_inliers.size() << "/" << guided_geometric_inliers.size());
std::swap(inliers, guidedGeometricInliers);
}

Expand All @@ -87,6 +87,64 @@ void robustModelEstimation(PairwiseMatches& out_geometricMatches,
}
}

/**
* @brief Perform robust model estimation (with optional guided_matching)
* or all the pairs and regions correspondences contained in the putativeMatches set.
* Allow to keep only geometrically coherent matches.
* It discards pairs that do not lead to a valid robust model estimation.
* @param[out] out_geometricInfos
* @param[in] sfmData
* @param[in] regionsPerView
* @param[in] functor
* @param[in] putativeMatches
* @param[in] randomNumberGenerator
*/
template<typename GeometryFunctor>
void robustModelEstimation(PairwiseGeometricInfo& out_geometricInfos,
const sfmData::SfMData& sfmData,
const feature::RegionsPerView& regionsPerView,
const GeometryFunctor& functor,
const PairwiseMatches& putativeMatches,
std::mt19937& randomNumberGenerator)
{
out_geometricInfos.clear();

auto progressDisplay = system::createConsoleProgressDisplay(putativeMatches.size(), std::cout, "Robust Model Estimation\n");

#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < (int)putativeMatches.size(); ++i)
{
PairwiseMatches::const_iterator iter = putativeMatches.begin();
std::advance(iter, i);

const Pair currentPair = iter->first;
const MatchesPerDescType& putativeMatchesPerType = iter->second;
const Pair& imagePair = iter->first;

// apply the geometric filter (robust model estimation)
{
MatchesPerDescType inliers;
GeometryFunctor geometricFilter = functor; // use a copy since we are in a multi-thread context
const EstimationStatus state =
geometricFilter.geometricEstimation(sfmData, regionsPerView, imagePair, putativeMatchesPerType, randomNumberGenerator, inliers);

if (state.hasStrongSupport)
{
#pragma omp critical
{
PairGeometricInfo info;
info.model = geometricFilter.getMatrix();
info.inliers = inliers.getNbAllMatches();
info.threshold = geometricFilter.m_dPrecision_robust;
info.type = geometricFilter.getType();
out_geometricInfos.emplace(currentPair, info);
}
}
}
++progressDisplay;
}
}

/**
* @brief removePoorlyOverlappingImagePairs Removes image pairs from the given list of geometric
* matches that have poor overlap according to the supplied criteria.
Expand Down
Loading
Loading