diff --git a/CMakeLists.txt b/CMakeLists.txt index 62b55d3ae..f03841752 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -309,13 +309,15 @@ if(BUILD_SHARED_LIBS AND KAHYPAR_STATIC_LINK_DEPENDENCIES) endif() block() + # define required boost libraries + set(BOOST_INCLUDE_LIBRARIES program_options range dynamic_bitset lexical_cast) + if(KAHYPAR_STATIC_LINK_DEPENDENCIES) set(Boost_USE_STATIC_LIBS ON) set(BUILD_SHARED_LIBS OFF) endif() if(KAHYPAR_DOWNLOAD_BOOST) # Download Boost - set(BOOST_INCLUDE_LIBRARIES program_options range dynamic_bitset) set(BOOST_ENABLE_CMAKE ON) FetchContent_Declare( Boost EXCLUDE_FROM_ALL SYSTEM @@ -332,16 +334,14 @@ block() endif() message(STATUS "Boost Include: ${Boost_INCLUDE_DIRS}, Boost Library: ${Boost_LIBRARY_DIRS}") endif() -endblock() -# the downloaded (newer) boost requires to add these targets explicitly -if(TARGET Boost::range) - target_link_libraries(MtKaHyPar-Include INTERFACE Boost::range) -endif() -if(TARGET Boost::dynamic_bitset) - target_link_libraries(MtKaHyPar-Include INTERFACE Boost::dynamic_bitset) -endif() -target_link_libraries(MtKaHyPar-Include INTERFACE Boost::program_options) + # newer boost versions require to add all targets explicitly + foreach(BOOST_TARGET ${BOOST_INCLUDE_LIBRARIES}) + if(TARGET Boost::${BOOST_TARGET}) + target_link_libraries(MtKaHyPar-Include INTERFACE Boost::${BOOST_TARGET}) + endif() + endforeach() +endblock() if(KAHYPAR_DOWNLOAD_TBB) diff --git a/README.md b/README.md index 20921ffd4..b33965e4b 100644 --- a/README.md +++ b/README.md @@ -249,11 +249,12 @@ When including Mt-KaHyPar directly, it is also possible to control static versus ### The C Library Interface -The library interface can be found in `include/mtkahypar.h` with a detailed documentation. We also provide several examples in the folder `lib/examples` that show how to use the library. +The library interface can be found in [`include/mtkahypar.h`](include/mtkahypar.h) with a detailed documentation. We also provide [several examples](lib/examples) that show how to use the library. Here is a short example of how you can partition a hypergraph using our library interface: ```cpp +#include #include #include #include @@ -262,15 +263,15 @@ Here is a short example of how you can partition a hypergraph using our library #include int main(int argc, char* argv[]) { + mt_kahypar_error_t error{}; - // Initialize thread pool + // Initialize mt_kahypar_initialize( std::thread::hardware_concurrency() /* use all available cores */, true /* activate interleaved NUMA allocation policy */ ); // Setup partitioning context - mt_kahypar_context_t* context = mt_kahypar_context_new(); - mt_kahypar_load_preset(context, DEFAULT /* corresponds to MT-KaHyPar-D */); + mt_kahypar_context_t* context = mt_kahypar_context_from_preset(DEFAULT); // In the following, we partition a hypergraph into two blocks // with an allowed imbalance of 3% and optimize the connective metric (KM1) mt_kahypar_set_partitioning_parameters(context, @@ -278,30 +279,37 @@ int main(int argc, char* argv[]) { KM1 /* objective function */); mt_kahypar_set_seed(42 /* seed */); // Enable logging - mt_kahypar_set_context_parameter(context, VERBOSE, "1"); + mt_kahypar_status_t status = + mt_kahypar_set_context_parameter(context, VERBOSE, "1", &error); + assert(status == SUCCESS); // Load Hypergraph for DEFAULT preset mt_kahypar_hypergraph_t hypergraph = - mt_kahypar_read_hypergraph_from_file( - "path/to/hypergraph/file", DEFAULT, HMETIS /* file format */); + mt_kahypar_read_hypergraph_from_file("path/to/hypergraph/file", + context, HMETIS /* file format */, &error); + if (hypergraph.hypergraph == nullptr) { + std::cout << error.msg << std::endl; std::exit(1); + } // Partition Hypergraph mt_kahypar_partitioned_hypergraph_t partitioned_hg = - mt_kahypar_partition(hypergraph, context); + mt_kahypar_partition(hypergraph, context, &error); + if (partitioned_hg.partitioned_hg == nullptr) { + std::cout << error.msg << std::endl; std::exit(1); + } // Extract Partition - std::unique_ptr partition = - std::make_unique(mt_kahypar_num_hypernodes(hypergraph)); + auto partition = std::make_unique( + mt_kahypar_num_hypernodes(hypergraph)); mt_kahypar_get_partition(partitioned_hg, partition.get()); // Extract Block Weights - std::unique_ptr block_weights = - std::make_unique(2); + auto block_weights = std::make_unique(2); mt_kahypar_get_block_weights(partitioned_hg, block_weights.get()); // Compute Metrics const double imbalance = mt_kahypar_imbalance(partitioned_hg, context); - const double km1 = mt_kahypar_km1(partitioned_hg); + const int km1 = mt_kahypar_km1(partitioned_hg); // Output Results std::cout << "Partitioning Results:" << std::endl; @@ -333,12 +341,11 @@ The `mt_kahypar_hypergraph_t` structure stores a pointer to this data structure Therefore, you can not partition a (hyper)graph with all available configurations once it is loaded or constructed. However, you can check the compatibility of a hypergraph with a configuration with the following code: ```cpp -mt_kahypar_context_t context = mt_kahypar_context_new(); -mt_kahypar_load_preset(context, QUALITY); +mt_kahypar_context_t* context = mt_kahypar_context_from_preset(QUALITY); // Check if the hypergraph is compatible with the QUALITY preset if ( mt_kahypar_check_compatibility(hypergraph, QUALITY) ) { mt_kahypar_partitioned_hypergraph_t partitioned_hg = - mt_kahypar_partition(hypergraph, context); + mt_kahypar_partition(hypergraph, context, &error); } ``` @@ -353,8 +360,8 @@ make mtkahypar_python This will create a shared library in the `build/python` folder (`mtkahypar.so` on Linux and `mtkahypar.pyd` on Windows). Copy the libary to your Python project directory to import Mt-KaHyPar as a Python module. -A documentation of the Python module can be found in `python/module.cpp`, or by importing the module (`import mtkahypar`) and calling `help(mtkahypar)` in Python. -We also provide several examples that show how to use the Python interface in the folder `python/examples`. +A documentation of the Python module can be found by importing the module (`import mtkahypar`) and calling `help(mtkahypar)` in Python. +We also provide [several examples](python/examples) that show how to use the Python interface. Here is a short example of how you can partition a hypergraph using our Python interface: @@ -362,65 +369,53 @@ Here is a short example of how you can partition a hypergraph using our Python i import multiprocessing import mtkahypar -# Initialize thread pool -mtkahypar.initialize(multiprocessing.cpu_count()) # use all available cores +# Initialize +mtk = mtkahypar.initialize(multiprocessing.cpu_count()) # use all available cores # Setup partitioning context -context = mtkahypar.Context() -context.loadPreset(mtkahypar.PresetType.DEFAULT) # corresponds to Mt-KaHyPar-D +context = mtk.context_from_preset(mtkahypar.PresetType.DEFAULT) # In the following, we partition a hypergraph into two blocks # with an allowed imbalance of 3% and optimize the connectivity metric -context.setPartitioningParameters( +context.set_partitioning_parameters( 2, # number of blocks 0.03, # imbalance parameter mtkahypar.Objective.KM1) # objective function -mtkahypar.setSeed(42) # seed -context.logging = True # enables partitioning output +mtkahypar.set_seed(42) # seed +context.logging = True -# Load hypergraph from file -hypergraph = mtkahypar.Hypergraph( - "path/to/hypergraph/file", # hypergraph file - mtkahypar.FileFormat.HMETIS) # hypergraph is stored in hMetis file format +# Load hypergraph from file (assumes hMetis file format per default) +hypergraph = mtk.hypergraph_from_file("path/to/hypergraph/file", context) # Partition hypergraph partitioned_hg = hypergraph.partition(context) # Output metrics print("Partition Stats:") -print("Imbalance = " + str(partitioned_hg.imbalance())) +print("Imbalance = " + str(partitioned_hg.imbalance(context))) print("km1 = " + str(partitioned_hg.km1())) print("Block Weights:") -print("Weight of Block 0 = " + str(partitioned_hg.blockWeight(0))) -print("Weight of Block 1 = " + str(partitioned_hg.blockWeight(1))) +for i in partitioned_hg.blocks(): + print(f"Weight of Block {i} = {partitioned_hg.block_weight(i)}") ``` We also provide an optimized graph data structure for partitioning plain graphs. The following example loads and partitions a graph: ```py -# Load graph from file -graph = mtkahypar.Graph( - "path/to/graph/file", # graph file - mtkahypar.FileFormat.METIS) # graph is stored in Metis file format +# Load graph from file (assumes Metis file format per default) +graph = mtkahypar.graph_from_file("path/to/graph/file", context) # Partition graph partitioned_graph = graph.partition(context) ``` -**Note** that for partitioning hypergraphs into a large number of blocks (e.g., k > 1024), we recommend using the `LARGE_K` configuration and the `partitionIntoLargeK(...)` function. -Using a different configuration for large k partitioning may cause excessive memory usage and high running times, depending on the size of the hypergraph and the memory capacity of your target machine. -For partitioning plain graphs, you can load the `LARGE_K` configuration, but you can still use the `partition(...)` function of the graph object. -Here is an example that partitions a hypergraph into 1024 blocks: -```py -# Setup partitioning context -context = mtkahypar.Context() -context.loadPreset(mtkahypar.PresetType.LARGE_K) -# In the following, we partition a hypergraph into 1024 blocks -# with an allowed imbalance of 3% and optimize the connectivity metric -context.setPartitioningParameters(1024, 0.03, mtkahypar.Objective.KM1, 42) +**Note** that we internally use different data structures to represent a (hyper)graph based on the corresponding configuration (`PresetType`). +Therefore, you can not partition a (hyper)graph with all available configurations once it is loaded or constructed. However, you can check the compatibility of a hypergraph with a configuration with the following code: -# Load and partition hypergraph -hypergraph = mtkahypar.Hypergraph("path/to/hypergraph/file", mtkahypar.FileFormat.HMETIS) -partitioned_hg = hypergraph.partitionIntoLargeK(context) +```py +context = mtk.context_from_preset(mtkahypar.PresetType.QUALITY) +# Check if the hypergraph is compatible with the QUALITY preset +if hypergraph.is_compatible(context.preset): + partitioned_hg = hypergraph.partition(context) ``` Supported Objective Functions diff --git a/include/helper_functions.h b/include/helper_functions.h deleted file mode 100644 index 9f803f81e..000000000 --- a/include/helper_functions.h +++ /dev/null @@ -1,234 +0,0 @@ -/******************************************************************************* - * MIT License - * - * This file is part of Mt-KaHyPar. - * - * Copyright (C) 2023 Tobias Heuer - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - ******************************************************************************/ - -#pragma once - -#include -#include - -#include "mtkahypartypes.h" - -#include "mt-kahypar/partition/context.h" - -using namespace mt_kahypar; - -namespace lib { -bool check_compatibility(mt_kahypar_hypergraph_t hypergraph, - mt_kahypar_preset_type_t preset) { - switch ( preset ) { - case DEFAULT: - case QUALITY: - case DETERMINISTIC: - case LARGE_K: - return hypergraph.type == STATIC_GRAPH || hypergraph.type == STATIC_HYPERGRAPH; - case HIGHEST_QUALITY: - return hypergraph.type == DYNAMIC_GRAPH || hypergraph.type == DYNAMIC_HYPERGRAPH; - } - return false; -} - -bool check_compatibility(mt_kahypar_partitioned_hypergraph_t partitioned_hg, - mt_kahypar_preset_type_t preset) { - switch ( preset ) { - case DEFAULT: - case QUALITY: - case DETERMINISTIC: - return partitioned_hg.type == MULTILEVEL_GRAPH_PARTITIONING || - partitioned_hg.type == MULTILEVEL_HYPERGRAPH_PARTITIONING; - case LARGE_K: - return partitioned_hg.type == MULTILEVEL_GRAPH_PARTITIONING || - partitioned_hg.type == LARGE_K_PARTITIONING; - case HIGHEST_QUALITY: - return partitioned_hg.type == N_LEVEL_GRAPH_PARTITIONING || - partitioned_hg.type == N_LEVEL_HYPERGRAPH_PARTITIONING; - } - return false; -} - - -bool check_if_all_relavant_parameters_are_set(Context& context) { - bool success = true; - if ( context.partition.preset_type == PresetType::UNDEFINED ) { - WARNING("Preset type not specified. Either use mt_kahypar_load_preset(...) or specify" - << "parameter 'preset-type' in your configuration file!"); - success = false; - } - if ( context.partition.k == std::numeric_limits::max() ) { - WARNING("Number of blocks not specified."); - success = false; - } - if ( context.partition.epsilon == std::numeric_limits::max() ) { - WARNING("Imbalance not specified."); - success = false; - } - if ( context.partition.objective == Objective::UNDEFINED ) { - WARNING("Objective function not specified."); - success = false; - } - return success; -} - -void prepare_context(Context& context) { - context.shared_memory.original_num_threads = mt_kahypar::TBBInitializer::instance().total_number_of_threads(); - context.shared_memory.num_threads = mt_kahypar::TBBInitializer::instance().total_number_of_threads(); - context.utility_id = mt_kahypar::utils::Utilities::instance().registerNewUtilityObjects(); - - context.partition.perfect_balance_part_weights.clear(); - if ( !context.partition.use_individual_part_weights ) { - context.partition.max_part_weights.clear(); - } -} - -InstanceType get_instance_type(mt_kahypar_hypergraph_t hypergraph) { - switch ( hypergraph.type ) { - case STATIC_GRAPH: - case DYNAMIC_GRAPH: - return InstanceType::graph; - case STATIC_HYPERGRAPH: - case DYNAMIC_HYPERGRAPH: - return InstanceType::hypergraph; - case NULLPTR_HYPERGRAPH: - return InstanceType::UNDEFINED; - } - return InstanceType::UNDEFINED; -} - -InstanceType get_instance_type(mt_kahypar_partitioned_hypergraph_t partitioned_hg) { - switch ( partitioned_hg.type ) { - case MULTILEVEL_GRAPH_PARTITIONING: - case N_LEVEL_GRAPH_PARTITIONING: - return InstanceType::graph; - case MULTILEVEL_HYPERGRAPH_PARTITIONING: - case N_LEVEL_HYPERGRAPH_PARTITIONING: - case LARGE_K_PARTITIONING: - return InstanceType::hypergraph; - case NULLPTR_PARTITION: - return InstanceType::UNDEFINED; - } - return InstanceType::UNDEFINED; -} - -mt_kahypar_preset_type_t get_preset_c_type(const PresetType preset) { - switch ( preset ) { - case PresetType::default_preset: return DEFAULT; - case PresetType::quality: return QUALITY; - case PresetType::highest_quality: return HIGHEST_QUALITY; - case PresetType::deterministic: return DETERMINISTIC; - case PresetType::large_k: return LARGE_K; - case PresetType::UNDEFINED: return DEFAULT; - } - return DEFAULT; -} - -std::string incompatibility_description(mt_kahypar_hypergraph_t hypergraph) { - std::stringstream ss; - switch ( hypergraph.type ) { - case STATIC_GRAPH: - ss << "The hypergraph uses the static graph data structure which can be only used " - << "in combination with the following presets: " - << "DEFAULT, QUALITY, DETERMINISTIC and LARGE_K"; break; - case DYNAMIC_GRAPH: - ss << "The hypergraph uses the dynamic graph data structure which can be only used " - << "in combination with the following preset: " - << "HIGHEST_QUALITY"; break; - case STATIC_HYPERGRAPH: - ss << "The hypergraph uses the static hypergraph data structure which can be only used " - << "in combination with the following presets: " - << "DEFAULT, QUALITY, DETERMINISTIC and LARGE_K"; break; - case DYNAMIC_HYPERGRAPH: - ss << "The hypergraph uses the dynamic hypergraph data structure which can be only used " - << "in combination with the following preset: " - << "HIGHEST_QUALITY"; break; - case NULLPTR_HYPERGRAPH: - ss << "The hypergraph holds a nullptr. " - << "Did you forgot to construct or load a hypergraph?"; break; - } - return ss.str(); -} - -std::string incompatibility_description(mt_kahypar_partitioned_hypergraph_t partitioned_hg) { - std::stringstream ss; - switch ( partitioned_hg.type ) { - case MULTILEVEL_GRAPH_PARTITIONING: - ss << "The partitioned hypergraph uses the data structures for multilevel graph partitioning " - << "which can be only used in combination with the following presets: " - << "DEFAULT, QUALITY, DETERMINISTIC, and LARGE_K"; break; - case N_LEVEL_GRAPH_PARTITIONING: - ss << "The partitioned hypergraph uses the data structures for n-level graph partitioning " - << "which can be only used in combination with the following preset: " - << "HIGHEST_QUALITY"; break; - case MULTILEVEL_HYPERGRAPH_PARTITIONING: - ss << "The partitioned hypergraph uses the data structures for multilevel hypergraph partitioning " - << "which can be only used in combination with the following presets: " - << "DEFAULT, QUALITY, and DETERMINISTIC"; break; - case N_LEVEL_HYPERGRAPH_PARTITIONING: - ss << "The partitioned hypergraph uses the data structures for n-level hypergraph partitioning " - << "which can be only used in combination with the following preset: " - << "HIGHEST_QUALITY"; break; - case LARGE_K_PARTITIONING: - ss << "The partitioned hypergraph uses the data structures for large k hypergraph partitioning " - << "which can be only used in combination with the following preset: " - << "LARGE_K"; break; - case NULLPTR_PARTITION: - ss << "The hypergraph holds a nullptr. " - << "Did you forgot to construct or load a hypergraph?"; break; - } - return ss.str(); -} - -template -mt_kahypar_partitioned_hypergraph_t create_partitoned_hypergraph(Hypergraph& hg, - const mt_kahypar_partition_id_t num_blocks, - const mt_kahypar_partition_id_t* partition) { - PartitionedHypergraph partitioned_hg(num_blocks, hg, parallel_tag_t { }); - const mt_kahypar::HypernodeID num_nodes = hg.initialNumNodes(); - tbb::parallel_for(ID(0), num_nodes, [&](const mt_kahypar::HypernodeID& hn) { - partitioned_hg.setOnlyNodePart(hn, partition[hn]); - }); - partitioned_hg.initializePartition(); - return mt_kahypar_partitioned_hypergraph_t { reinterpret_cast( - new PartitionedHypergraph(std::move(partitioned_hg))), PartitionedHypergraph::TYPE }; -} - -template -void get_partition(const PartitionedHypergraph& partitioned_hg, - mt_kahypar_partition_id_t* partition) { - ASSERT(partition != nullptr); - partitioned_hg.doParallelForAllNodes([&](const HypernodeID& hn) { - partition[hn] = partitioned_hg.partID(hn); - }); -} - -template -void get_block_weights(const PartitionedHypergraph& partitioned_hg, - mt_kahypar_hypernode_weight_t* block_weights) { - ASSERT(block_weights != nullptr); - for ( PartitionID i = 0; i < partitioned_hg.k(); ++i ) { - block_weights[i] = partitioned_hg.partWeight(i); - } -} - -} // namespace lib \ No newline at end of file diff --git a/include/lib_generic_impls.h b/include/lib_generic_impls.h new file mode 100644 index 000000000..80708b324 --- /dev/null +++ b/include/lib_generic_impls.h @@ -0,0 +1,378 @@ +/******************************************************************************* + * MIT License + * + * This file is part of Mt-KaHyPar. + * + * Copyright (C) 2024 Nikolai Maas + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + ******************************************************************************/ + +#pragma once + +#include +#include +#include + +#include "mtkahypartypes.h" + +#include "mt-kahypar/definitions.h" +#include "mt-kahypar/partition/context.h" +#include "mt-kahypar/partition/conversion.h" +#include "mt-kahypar/partition/metrics.h" +#include "mt-kahypar/io/hypergraph_io.h" +#include "mt-kahypar/utils/cast.h" +#include "mt-kahypar/utils/exception.h" + + +using namespace mt_kahypar; + +namespace lib { + using StaticGraph = typename StaticGraphTypeTraits::Hypergraph; + using DynamicGraph = typename DynamicGraphTypeTraits::Hypergraph; + using StaticHypergraph = typename StaticHypergraphTypeTraits::Hypergraph; + using DynamicHypergraph = typename DynamicHypergraphTypeTraits::Hypergraph; + + using StaticPartitionedGraph = typename StaticGraphTypeTraits::PartitionedHypergraph; + using DynamicPartitionedGraph = typename DynamicGraphTypeTraits::PartitionedHypergraph; + using StaticPartitionedHypergraph = typename StaticHypergraphTypeTraits::PartitionedHypergraph; + using DynamicPartitionedHypergraph = typename DynamicHypergraphTypeTraits::PartitionedHypergraph; + using SparsePartitionedHypergraph = typename LargeKHypergraphTypeTraits::PartitionedHypergraph; + + +// ####################### Generic Handling of Different Graph Types ####################### + +template +ReturnT switch_hg(mt_kahypar_hypergraph_t hg, Func f) { + switch ( hg.type ) { + case STATIC_GRAPH: + return f(utils::cast(hg)); + case DYNAMIC_GRAPH: + return f(utils::cast(hg)); + case STATIC_HYPERGRAPH: + return f(utils::cast(hg)); + case DYNAMIC_HYPERGRAPH: + return f(utils::cast(hg)); + case NULLPTR_HYPERGRAPH: break; + } + if constexpr (Throwing) { + throw UnsupportedOperationException("Input is not a valid hypergraph."); + } + return ReturnT{}; +} + +template +ReturnT switch_graph(mt_kahypar_hypergraph_t hg, Func f) { + switch ( hg.type ) { + case STATIC_GRAPH: + return f(utils::cast(hg)); + case DYNAMIC_GRAPH: + return f(utils::cast(hg)); + case STATIC_HYPERGRAPH: + case DYNAMIC_HYPERGRAPH: + case NULLPTR_HYPERGRAPH: + break; + } + if constexpr (Throwing) { + throw UnsupportedOperationException("Input is not a graph."); + } + return ReturnT{}; +} + +template +ReturnT switch_phg(mt_kahypar_partitioned_hypergraph_t phg, Func f) { + switch ( phg.type ) { + case MULTILEVEL_GRAPH_PARTITIONING: + return f(utils::cast(phg)); + case N_LEVEL_GRAPH_PARTITIONING: + return f(utils::cast(phg)); + case MULTILEVEL_HYPERGRAPH_PARTITIONING: + return f(utils::cast(phg)); + case N_LEVEL_HYPERGRAPH_PARTITIONING: + return f(utils::cast(phg)); + case LARGE_K_PARTITIONING: + return f(utils::cast(phg)); + case NULLPTR_PARTITION: + break; + } + if constexpr (Throwing) { + throw UnsupportedOperationException("Input is not a valid partitioned hypergraph."); + } + return ReturnT{}; +} + +template +void check_hypernode_is_valid(const HypergraphT& hg, HypernodeID hn) { + if (hn >= hg.initialNumNodes()) { + std::stringstream ss; + ss << "Invalid hypernode: ID is \"" << hn << "\", but there are only " + << hg.initialNumNodes() << " nodes"; + throw InvalidInputException(ss.str()); + } +} + +template +void check_hyperedge_is_valid(const HypergraphT& hg, HyperedgeID he) { + if (he >= hg.initialNumEdges()) { + std::stringstream ss; + ss << "Invalid hyperedge: ID is \"" << he << "\", but there are only " + << hg.initialNumEdges() << " edges"; + throw InvalidInputException(ss.str()); + } +} + +template +void check_block_is_valid(const PartitionedHypergraphT& phg, PartitionID block) { + if (block >= phg.k()) { + std::stringstream ss; + ss << "Invalid block: ID is \"" << block << "\", but the partition has only " + << phg.k() << " blocks"; + throw InvalidInputException(ss.str()); + } +} + + +// ####################### Hypergraph Generic Implementations ####################### + +template +HypernodeID num_nodes(mt_kahypar_hypergraph_t hypergraph) { + return switch_hg(hypergraph, [](const auto& hg) { + return hg.initialNumNodes(); + }); +} + +template +HyperedgeID num_edges(mt_kahypar_hypergraph_t hypergraph) { + return switch_hg(hypergraph, [](const auto& hg) { + return hg.initialNumEdges(); + }); +} + +template +HypernodeID num_pins(mt_kahypar_hypergraph_t hypergraph) { + return switch_hg(hypergraph, [](const auto& hg) { + return hg.initialNumPins(); + }); +} + +template +HypernodeWeight total_weight(mt_kahypar_hypergraph_t hypergraph) { + return switch_hg(hypergraph, [](const auto& hg) { + return hg.totalWeight(); + }); +} + +template +HyperedgeID node_degree(mt_kahypar_hypergraph_t hypergraph, HypernodeID hn) { + return switch_hg(hypergraph, [=](const auto& hg) { + if constexpr (Throwing) { + check_hypernode_is_valid(hg, hn); + } + return hg.nodeDegree(hn); + }); +} + +template +HypernodeWeight node_weight(mt_kahypar_hypergraph_t hypergraph, HypernodeID hn) { + return switch_hg(hypergraph, [=](const auto& hg) { + if constexpr (Throwing) { + check_hypernode_is_valid(hg, hn); + } + return hg.nodeWeight(hn); + }); +} + +template +bool is_fixed(mt_kahypar_hypergraph_t hypergraph, HypernodeID hn) { + return switch_hg(hypergraph, [=](const auto& hg) { + if constexpr (Throwing) { + check_hypernode_is_valid(hg, hn); + } + return hg.isFixed(hn); + }); +} + +template +PartitionID fixed_vertex_block(mt_kahypar_hypergraph_t hypergraph, HypernodeID hn) { + return switch_hg(hypergraph, [=](const auto& hg) { + if constexpr (Throwing) { + check_hypernode_is_valid(hg, hn); + } + return hg.isFixed(hn) ? hg.fixedVertexBlock(hn) : kInvalidPartition; + }); +} + +template +HypernodeID edge_size(mt_kahypar_hypergraph_t hypergraph, HyperedgeID he) { + return switch_hg(hypergraph, [=](const auto& hg) { + if constexpr (Throwing) { + check_hyperedge_is_valid(hg, he); + } + return hg.edgeSize(he); + }); +} + +template +HyperedgeWeight edge_weight(mt_kahypar_hypergraph_t hypergraph, HyperedgeID he) { + return switch_hg(hypergraph, [=](const auto& hg) { + if constexpr (Throwing) { + check_hyperedge_is_valid(hg, he); + } + return hg.edgeWeight(he); + }); +} + + +// ####################### Partitioned Hypergraph Generic Implementations ####################### + +template +void write_partition_to_file(mt_kahypar_partitioned_hypergraph_t p, const std::string& partition_file) { + switch_phg(p, [&](auto& phg) { + io::writePartitionFile(phg, partition_file); + return 0; + }); +} + +template +void get_partition(mt_kahypar_partitioned_hypergraph_t p, mt_kahypar_partition_id_t* partition) { + ASSERT(partition != nullptr); + switch_phg(p, [&](const auto& phg) { + phg.doParallelForAllNodes([&](const HypernodeID& hn) { + partition[hn] = phg.partID(hn); + }); + return 0; + }); +} + +template +void get_block_weights(mt_kahypar_partitioned_hypergraph_t p, mt_kahypar_hypernode_weight_t* block_weights) { + ASSERT(block_weights != nullptr); + switch_phg(p, [&](const auto& phg) { + for ( PartitionID i = 0; i < phg.k(); ++i ) { + block_weights[i] = phg.partWeight(i); + } + return 0; + }); +} + +template +PartitionID num_blocks(mt_kahypar_partitioned_hypergraph_t p) { + return switch_phg(p, [](const auto& phg) { + return phg.k(); + }); +} + +template +HypernodeWeight block_weight(mt_kahypar_partitioned_hypergraph_t p, PartitionID block) { + return switch_phg(p, [=](const auto& phg) { + if constexpr (Throwing) { + check_block_is_valid(phg, block); + } + return phg.partWeight(block); + }); +} + +template +PartitionID block_id(mt_kahypar_partitioned_hypergraph_t p, HypernodeID hn) { + return switch_phg(p, [=](const auto& phg) { + if constexpr (Throwing) { + check_hypernode_is_valid(phg, hn); + } + return phg.partID(hn); + }); +} + +template +bool is_incident_to_cut_edge(mt_kahypar_partitioned_hypergraph_t p, HypernodeID hn) { + return switch_phg(p, [=](const auto& phg) { + if constexpr (Throwing) { + check_hypernode_is_valid(phg, hn); + } + return phg.isBorderNode(hn); + }); +} + +template +HyperedgeID num_incident_cut_edges(mt_kahypar_partitioned_hypergraph_t p, HypernodeID hn) { + return switch_phg(p, [=](const auto& phg) { + if constexpr (Throwing) { + check_hypernode_is_valid(phg, hn); + } + return phg.numIncidentCutHyperedges(hn); + }); +} + +template +PartitionID connectivity(mt_kahypar_partitioned_hypergraph_t p, HyperedgeID he) { + return switch_phg(p, [=](const auto& phg) { + if constexpr (Throwing) { + check_hyperedge_is_valid(phg, he); + } + return phg.connectivity(he); + }); +} + +template +HypernodeID num_pins_in_block(mt_kahypar_partitioned_hypergraph_t p, HyperedgeID he, PartitionID block) { + return switch_phg(p, [=](const auto& phg) { + if constexpr (Throwing) { + check_hyperedge_is_valid(phg, he); + check_block_is_valid(phg, block); + } + return phg.pinCountInPart(he, block); + }); +} + +template +double imbalance(mt_kahypar_partitioned_hypergraph_t p, const Context& context) { + return switch_phg(p, [&](const auto& phg) { + if (Throwing && context.partition.k != phg.k()) { + std::stringstream ss; + ss << "Mismatched number of blocks: the context specifies " << context.partition.k + << " blocks, but the partition has " << phg.k() << " blocks"; + throw InvalidInputException(ss.str()); + } + Context c(context); + c.setupPartWeights(phg.totalWeight()); + return metrics::imbalance(phg, c); + }); +} + +template +HyperedgeWeight cut(mt_kahypar_partitioned_hypergraph_t p) { + return switch_phg(p, [&](const auto& phg) { + return metrics::quality(phg, Objective::cut); + }); +} + +template +HyperedgeWeight km1(mt_kahypar_partitioned_hypergraph_t p) { + return switch_phg(p, [&](const auto& phg) { + return metrics::quality(phg, Objective::km1); + }); +} + +template +HyperedgeWeight soed(mt_kahypar_partitioned_hypergraph_t p) { + return switch_phg(p, [&](const auto& phg) { + return metrics::quality(phg, Objective::soed); + }); +} + +} // namespace lib diff --git a/include/lib_helper_functions.h b/include/lib_helper_functions.h new file mode 100644 index 000000000..54b764778 --- /dev/null +++ b/include/lib_helper_functions.h @@ -0,0 +1,472 @@ +/******************************************************************************* + * MIT License + * + * This file is part of Mt-KaHyPar. + * + * Copyright (C) 2023 Tobias Heuer + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + ******************************************************************************/ + +#pragma once + +#include +#include +#include + +#include "mtkahypartypes.h" + +#include "mt-kahypar/definitions.h" +#include "mt-kahypar/partition/context.h" +#include "mt-kahypar/partition/conversion.h" +#include "mt-kahypar/partition/partitioner_facade.h" +#include "mt-kahypar/partition/mapping/target_graph.h" +#include "mt-kahypar/partition/metrics.h" +#include "mt-kahypar/partition/registries/registry.h" +#include "mt-kahypar/io/hypergraph_factory.h" +#include "mt-kahypar/io/hypergraph_io.h" +#include "mt-kahypar/utils/cast.h" +#include "mt-kahypar/utils/exception.h" +#include "mt-kahypar/io/command_line_options.h" +#include "mt-kahypar/io/presets.h" + + +using namespace mt_kahypar; + +namespace lib { + using StaticGraph = typename StaticGraphTypeTraits::Hypergraph; + using DynamicGraph = typename DynamicGraphTypeTraits::Hypergraph; + using StaticHypergraph = typename StaticHypergraphTypeTraits::Hypergraph; + using DynamicHypergraph = typename DynamicHypergraphTypeTraits::Hypergraph; + + using StaticPartitionedGraph = typename StaticGraphTypeTraits::PartitionedHypergraph; + using DynamicPartitionedGraph = typename DynamicGraphTypeTraits::PartitionedHypergraph; + using StaticPartitionedHypergraph = typename StaticHypergraphTypeTraits::PartitionedHypergraph; + using DynamicPartitionedHypergraph = typename DynamicHypergraphTypeTraits::PartitionedHypergraph; + using SparsePartitionedHypergraph = typename LargeKHypergraphTypeTraits::PartitionedHypergraph; + + using StaticHypergraphFactory = typename ds::StaticHypergraph::Factory; + using DynamicHypergraphFactory = typename ds::DynamicHypergraph::Factory; + using StaticGraphFactory = typename ds::StaticGraph::Factory; + using DynamicGraphFactory = typename ds::DynamicGraph::Factory; + + +// ####################### General Helper Functions ####################### + +void initialize(const size_t num_threads, const bool interleaved_allocations, const bool print_warnings) { + size_t P = num_threads; + #ifndef KAHYPAR_DISABLE_HWLOC + size_t num_available_cpus = HardwareTopology::instance().num_cpus(); + if ( num_available_cpus < num_threads ) { + P = num_available_cpus; + if (print_warnings) { + WARNING("There are currently only" << num_available_cpus << "cpus available." + << "Setting number of threads from" << num_threads << "to" << num_available_cpus); + } + } + #endif + + // Initialize TBB task arenas on numa nodes + TBBInitializer::instance(P); + + #ifndef KAHYPAR_DISABLE_HWLOC + if ( interleaved_allocations ) { + // We set the membind policy to interleaved allocations in order to + // distribute allocations evenly across NUMA nodes + hwloc_cpuset_t cpuset = TBBInitializer::instance().used_cpuset(); + parallel::HardwareTopology<>::instance().activate_interleaved_membind_policy(cpuset); + hwloc_bitmap_free(cpuset); + } + #else + unused(interleaved_allocations); + #endif + + register_algorithms_and_policies(); +} + +bool is_compatible(mt_kahypar_hypergraph_t hypergraph, mt_kahypar_preset_type_t preset) { + switch ( preset ) { + case DEFAULT: + case QUALITY: + case DETERMINISTIC: + case LARGE_K: + return hypergraph.type == STATIC_GRAPH || hypergraph.type == STATIC_HYPERGRAPH; + case HIGHEST_QUALITY: + return hypergraph.type == DYNAMIC_GRAPH || hypergraph.type == DYNAMIC_HYPERGRAPH; + } + return false; +} + +bool is_compatible(mt_kahypar_partitioned_hypergraph_t partitioned_hg, mt_kahypar_preset_type_t preset) { + switch ( preset ) { + case DEFAULT: + case QUALITY: + case DETERMINISTIC: + return partitioned_hg.type == MULTILEVEL_GRAPH_PARTITIONING || + partitioned_hg.type == MULTILEVEL_HYPERGRAPH_PARTITIONING; + case LARGE_K: + return partitioned_hg.type == MULTILEVEL_GRAPH_PARTITIONING || + partitioned_hg.type == LARGE_K_PARTITIONING; + case HIGHEST_QUALITY: + return partitioned_hg.type == N_LEVEL_GRAPH_PARTITIONING || + partitioned_hg.type == N_LEVEL_HYPERGRAPH_PARTITIONING; + } + return false; +} + +void check_if_all_relevant_parameters_are_set(Context& context) { + bool success = true; + auto check_parameter = [&](bool is_uninitialized, const char* warning_msg) { + if (is_uninitialized) { + success = false; + if (context.partition.verbose_output) { + WARNING(warning_msg); + } + } + }; + + check_parameter(context.partition.preset_type == PresetType::UNDEFINED, "Preset type not specified."); + check_parameter(context.partition.k == std::numeric_limits::max(), "Number of blocks not specified."); + check_parameter(context.partition.epsilon == std::numeric_limits::max(), "Imbalance not specified."); + check_parameter(context.partition.objective == Objective::UNDEFINED, "Objective function not specified."); + if (!success) { + throw InvalidInputException("A required context parameter is not set. Required are: preset type, k, epsilon, objective"); + } +} + +Context context_from_file(const char* ini_file_name) { + Context context(false); + parseIniToContext(context, ini_file_name, true); + return context; +} + +Context context_from_preset(PresetType preset) { + Context context(false); + auto preset_option_list = loadPreset(preset); + presetToContext(context, preset_option_list, true); + return context; +} + +void prepare_context(Context& context) { + context.shared_memory.original_num_threads = mt_kahypar::TBBInitializer::instance().total_number_of_threads(); + context.shared_memory.num_threads = mt_kahypar::TBBInitializer::instance().total_number_of_threads(); + context.utility_id = mt_kahypar::utils::Utilities::instance().registerNewUtilityObjects(); + + context.partition.perfect_balance_part_weights.clear(); + if ( !context.partition.use_individual_part_weights ) { + context.partition.max_part_weights.clear(); + } +} + +InstanceType get_instance_type(mt_kahypar_hypergraph_t hypergraph) { + switch ( hypergraph.type ) { + case STATIC_GRAPH: + case DYNAMIC_GRAPH: + return InstanceType::graph; + case STATIC_HYPERGRAPH: + case DYNAMIC_HYPERGRAPH: + return InstanceType::hypergraph; + case NULLPTR_HYPERGRAPH: + return InstanceType::UNDEFINED; + } + return InstanceType::UNDEFINED; +} + +InstanceType get_instance_type(mt_kahypar_partitioned_hypergraph_t partitioned_hg) { + switch ( partitioned_hg.type ) { + case MULTILEVEL_GRAPH_PARTITIONING: + case N_LEVEL_GRAPH_PARTITIONING: + return InstanceType::graph; + case MULTILEVEL_HYPERGRAPH_PARTITIONING: + case N_LEVEL_HYPERGRAPH_PARTITIONING: + case LARGE_K_PARTITIONING: + return InstanceType::hypergraph; + case NULLPTR_PARTITION: + return InstanceType::UNDEFINED; + } + return InstanceType::UNDEFINED; +} + +mt_kahypar_preset_type_t get_preset_c_type(const PresetType preset) { + switch ( preset ) { + case PresetType::default_preset: return DEFAULT; + case PresetType::quality: return QUALITY; + case PresetType::highest_quality: return HIGHEST_QUALITY; + case PresetType::deterministic: return DETERMINISTIC; + case PresetType::large_k: return LARGE_K; + case PresetType::UNDEFINED: return DEFAULT; + } + return DEFAULT; +} + +std::string incompatibility_description(mt_kahypar_hypergraph_t hypergraph) { + std::stringstream ss; + switch ( hypergraph.type ) { + case STATIC_GRAPH: + ss << "The hypergraph uses the static graph data structure which can be only used " + << "in combination with the following presets: " + << "DEFAULT, QUALITY, DETERMINISTIC and LARGE_K"; break; + case DYNAMIC_GRAPH: + ss << "The hypergraph uses the dynamic graph data structure which can be only used " + << "in combination with the following preset: " + << "HIGHEST_QUALITY"; break; + case STATIC_HYPERGRAPH: + ss << "The hypergraph uses the static hypergraph data structure which can be only used " + << "in combination with the following presets: " + << "DEFAULT, QUALITY, DETERMINISTIC and LARGE_K"; break; + case DYNAMIC_HYPERGRAPH: + ss << "The hypergraph uses the dynamic hypergraph data structure which can be only used " + << "in combination with the following preset: " + << "HIGHEST_QUALITY"; break; + case NULLPTR_HYPERGRAPH: + ss << "The hypergraph holds a nullptr. " + << "Did you forgot to construct or load a hypergraph?"; break; + } + return ss.str(); +} + +void check_compatibility(mt_kahypar_hypergraph_t hypergraph, + mt_kahypar_preset_type_t preset) { + if ( !is_compatible(hypergraph, preset) ) { + throw UnsupportedOperationException(incompatibility_description(hypergraph)); + } +} + +std::string incompatibility_description(mt_kahypar_partitioned_hypergraph_t partitioned_hg) { + std::stringstream ss; + switch ( partitioned_hg.type ) { + case MULTILEVEL_GRAPH_PARTITIONING: + ss << "The partitioned hypergraph uses the data structures for multilevel graph partitioning " + << "which can be only used in combination with the following presets: " + << "DEFAULT, QUALITY, DETERMINISTIC, and LARGE_K"; break; + case N_LEVEL_GRAPH_PARTITIONING: + ss << "The partitioned hypergraph uses the data structures for n-level graph partitioning " + << "which can be only used in combination with the following preset: " + << "HIGHEST_QUALITY"; break; + case MULTILEVEL_HYPERGRAPH_PARTITIONING: + ss << "The partitioned hypergraph uses the data structures for multilevel hypergraph partitioning " + << "which can be only used in combination with the following presets: " + << "DEFAULT, QUALITY, and DETERMINISTIC"; break; + case N_LEVEL_HYPERGRAPH_PARTITIONING: + ss << "The partitioned hypergraph uses the data structures for n-level hypergraph partitioning " + << "which can be only used in combination with the following preset: " + << "HIGHEST_QUALITY"; break; + case LARGE_K_PARTITIONING: + ss << "The partitioned hypergraph uses the data structures for large k hypergraph partitioning " + << "which can be only used in combination with the following preset: " + << "LARGE_K"; break; + case NULLPTR_PARTITION: + ss << "The hypergraph holds a nullptr. " + << "Did you forgot to construct or load a hypergraph?"; break; + } + return ss.str(); +} + +void check_compatibility(mt_kahypar_partitioned_hypergraph_t partitioned_hg, + mt_kahypar_preset_type_t preset) { + if ( !is_compatible(partitioned_hg, preset) ) { + throw UnsupportedOperationException(incompatibility_description(partitioned_hg)); + } +} + +mt_kahypar_hypergraph_t hypergraph_from_file(const std::string& file_name, + const Context& context, + const InstanceType instance_type, + const FileFormat file_format) { + return io::readInputFile(file_name, context.partition.preset_type, instance_type, file_format, true); +} + +mt_kahypar_hypergraph_t create_hypergraph(const Context& context, + const mt_kahypar_hypernode_id_t num_vertices, + const mt_kahypar_hyperedge_id_t num_hyperedges, + const vec>& edge_vector, + const mt_kahypar_hyperedge_weight_t* hyperedge_weights, + const mt_kahypar_hypernode_weight_t* vertex_weights) { + switch ( context.partition.preset_type ) { + case PresetType::deterministic: + case PresetType::large_k: + case PresetType::default_preset: + case PresetType::quality: + return mt_kahypar_hypergraph_t { + reinterpret_cast(new ds::StaticHypergraph( + StaticHypergraphFactory::construct(num_vertices, num_hyperedges, + edge_vector, hyperedge_weights, vertex_weights, true))), STATIC_HYPERGRAPH }; + case PresetType::highest_quality: + return mt_kahypar_hypergraph_t { + reinterpret_cast(new ds::DynamicHypergraph( + DynamicHypergraphFactory::construct(num_vertices, num_hyperedges, + edge_vector, hyperedge_weights, vertex_weights, true))), DYNAMIC_HYPERGRAPH }; + case PresetType::UNDEFINED: + break; + } + throw InvalidParameterException("Invalid preset type."); +} + +mt_kahypar_hypergraph_t create_graph(const Context& context, + const mt_kahypar_hypernode_id_t num_vertices, + const mt_kahypar_hyperedge_id_t num_edges, + const vec>& edge_vector, + const mt_kahypar_hyperedge_weight_t* edge_weights, + const mt_kahypar_hypernode_weight_t* vertex_weights) { + switch ( context.partition.preset_type ) { + case PresetType::deterministic: + case PresetType::large_k: + case PresetType::default_preset: + case PresetType::quality: + return mt_kahypar_hypergraph_t { + reinterpret_cast(new ds::StaticGraph( + StaticGraphFactory::construct_from_graph_edges(num_vertices, num_edges, + edge_vector, edge_weights, vertex_weights, true))), STATIC_GRAPH }; + case PresetType::highest_quality: + return mt_kahypar_hypergraph_t { + reinterpret_cast(new ds::DynamicGraph( + DynamicGraphFactory::construct_from_graph_edges(num_vertices, num_edges, + edge_vector, edge_weights, vertex_weights, true))), DYNAMIC_GRAPH }; + case PresetType::UNDEFINED: + break; + } + throw InvalidParameterException("Invalid preset type."); +} + +template +mt_kahypar_partitioned_hypergraph_t create_partitioned_hypergraph(Hypergraph& hg, + const mt_kahypar_partition_id_t num_blocks, + const mt_kahypar_partition_id_t* partition) { + PartitionedHypergraph partitioned_hg(num_blocks, hg, parallel_tag_t { }); + const mt_kahypar::HypernodeID num_nodes = hg.initialNumNodes(); + tbb::parallel_for(ID(0), num_nodes, [&](const mt_kahypar::HypernodeID& hn) { + partitioned_hg.setOnlyNodePart(hn, partition[hn]); + }); + partitioned_hg.initializePartition(); + return mt_kahypar_partitioned_hypergraph_t { reinterpret_cast( + new PartitionedHypergraph(std::move(partitioned_hg))), PartitionedHypergraph::TYPE }; +} + +mt_kahypar_partitioned_hypergraph_t create_partitioned_hypergraph(mt_kahypar_hypergraph_t hypergraph, + const Context& context, + const mt_kahypar_partition_id_t num_blocks, + const mt_kahypar_partition_id_t* partition) { + if ( hypergraph.type == STATIC_GRAPH || hypergraph.type == DYNAMIC_GRAPH ) { + switch ( context.partition.preset_type ) { + case PresetType::large_k: + case PresetType::deterministic: + case PresetType::default_preset: + case PresetType::quality: + ASSERT(hypergraph.type == STATIC_GRAPH); + return create_partitioned_hypergraph( + utils::cast(hypergraph), num_blocks, partition); + case PresetType::highest_quality: + ASSERT(hypergraph.type == DYNAMIC_GRAPH); + return create_partitioned_hypergraph( + utils::cast(hypergraph), num_blocks, partition); + case PresetType::UNDEFINED: break; + } + } else { + switch ( context.partition.preset_type ) { + case PresetType::large_k: + ASSERT(hypergraph.type == STATIC_HYPERGRAPH); + return create_partitioned_hypergraph( + utils::cast(hypergraph), num_blocks, partition); + case PresetType::deterministic: + case PresetType::default_preset: + case PresetType::quality: + ASSERT(hypergraph.type == STATIC_HYPERGRAPH); + return create_partitioned_hypergraph( + utils::cast(hypergraph), num_blocks, partition); + case PresetType::highest_quality: + ASSERT(hypergraph.type == DYNAMIC_HYPERGRAPH); + return create_partitioned_hypergraph( + utils::cast(hypergraph), num_blocks, partition); + case PresetType::UNDEFINED: break; + } + } + throw InvalidParameterException("Invalid preset type."); +} + +void set_individual_block_weights(Context& context, + const mt_kahypar_partition_id_t num_blocks, + const mt_kahypar_hypernode_weight_t* block_weights) { + context.partition.use_individual_part_weights = true; + context.partition.max_part_weights.assign(num_blocks, 0); + for ( mt_kahypar_partition_id_t i = 0; i < num_blocks; ++i ) { + context.partition.max_part_weights[i] = block_weights[i]; + } +} + + +// ####################### Partitioning ####################### + +mt_kahypar_partitioned_hypergraph_t partition_impl(mt_kahypar_hypergraph_t hg, Context& context, TargetGraph* target_graph) { + check_compatibility(hg, get_preset_c_type(context.partition.preset_type)); + check_if_all_relevant_parameters_are_set(context); + context.partition.instance_type = get_instance_type(hg); + context.partition.partition_type = to_partition_c_type(context.partition.preset_type, context.partition.instance_type); + prepare_context(context); + context.partition.num_vcycles = 0; + return PartitionerFacade::partition(hg, context, target_graph); +} + +mt_kahypar_partitioned_hypergraph_t partition(mt_kahypar_hypergraph_t hg, const Context& context) { + Context partition_context(context); + return partition_impl(hg, partition_context, nullptr); +} + +mt_kahypar_partitioned_hypergraph_t map(mt_kahypar_hypergraph_t hg, TargetGraph& target_graph, const Context& context) { + if (static_cast(target_graph.graph().initialNumNodes()) != context.partition.k) { + std::stringstream ss; + ss << "Mismatched number of blocks: the context specifies " << context.partition.k + << " blocks, but the target graph has " << target_graph.graph().initialNumNodes() << " blocks"; + throw InvalidInputException(ss.str()); + } + Context partition_context(context); + partition_context.partition.objective = Objective::steiner_tree; + return partition_impl(hg, partition_context, &target_graph); +} + + +// ####################### V-Cycles ####################### + +void improve_impl(mt_kahypar_partitioned_hypergraph_t phg, + Context& context, + const size_t num_vcycles, + TargetGraph* target_graph) { + check_compatibility(phg, get_preset_c_type(context.partition.preset_type)); + check_if_all_relevant_parameters_are_set(context); + context.partition.instance_type = get_instance_type(phg); + context.partition.partition_type = to_partition_c_type(context.partition.preset_type, context.partition.instance_type); + prepare_context(context); + context.partition.num_vcycles = num_vcycles; + PartitionerFacade::improve(phg, context, target_graph); +} + +void improve(mt_kahypar_partitioned_hypergraph_t phg, const Context& context, const size_t num_vcycles) { + Context partition_context(context); + improve_impl(phg, partition_context, num_vcycles, nullptr); +} + +void improve_mapping(mt_kahypar_partitioned_hypergraph_t phg, + TargetGraph& target_graph, + const Context& context, + const size_t num_vcycles) { + Context partition_context(context); + partition_context.partition.objective = Objective::steiner_tree; + improve_impl(phg, partition_context, num_vcycles, &target_graph); +} + +} // namespace lib diff --git a/include/mtkahypar.h b/include/mtkahypar.h index f5725bda0..283172a78 100644 --- a/include/mtkahypar.h +++ b/include/mtkahypar.h @@ -4,6 +4,7 @@ * This file is part of Mt-KaHyPar. * * Copyright (C) 2020 Tobias Heuer + * Copyright (C) 2024 Nikolai Maas * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -29,6 +30,10 @@ #include +#ifndef __cplusplus +#include +#endif + #include "mtkahypartypes.h" #ifdef __cplusplus @@ -39,11 +44,6 @@ extern "C" { // ####################### Setup Context ####################### -/** - * Creates a new empty partitioning context object. - */ -MT_KAHYPAR_API mt_kahypar_context_t* mt_kahypar_context_new(); - /** * Deletes the partitioning context object. */ @@ -52,29 +52,29 @@ MT_KAHYPAR_API void mt_kahypar_free_context(mt_kahypar_context_t* context); /** * Loads a partitioning context from a configuration file. */ -MT_KAHYPAR_API void mt_kahypar_configure_context_from_file(mt_kahypar_context_t* context, - const char* ini_file_name); +MT_KAHYPAR_API mt_kahypar_context_t* mt_kahypar_context_from_file(const char* ini_file_name, + mt_kahypar_error_t* error); /** * Loads a partitioning context of a predefined preset type. - * Possible preset types are DETERMINISTIC (corresponds to Mt-KaHyPar-SDet), - * SPEED (corresponds to Mt-KaHyPar-D) and HIGH_QUALITY (corresponds to Mt-KaHyPar-D-F) + * + * See 'mt_kahypar_preset_type_t' for possible presets. */ -MT_KAHYPAR_API void mt_kahypar_load_preset(mt_kahypar_context_t* context, - const mt_kahypar_preset_type_t preset); +MT_KAHYPAR_API mt_kahypar_context_t* mt_kahypar_context_from_preset(const mt_kahypar_preset_type_t preset); /** * Sets a new value for a context parameter. * * Usage: - * mt_kahypar_set_context_parameter(context, OBJECTIVE, "km1") // sets the objective function to the connectivity metric + * mt_kahypar_set_context_parameter(context, OBJECTIVE, "km1", &error) // sets the objective function to the connectivity metric * - * \return exit code zero if the corresponding parameter is successfully set to the value. Otherwise, it returns - * 1 for an unknown parameter type, 2 for an integer conversion error or 3 for an unknown value type. + * \return success (zero) if the corresponding parameter is successfully set to the value. Otherwise, + * returns INVALID_PARAMETER and error is set accordingly. */ -MT_KAHYPAR_API int mt_kahypar_set_context_parameter(mt_kahypar_context_t* context, - const mt_kahypar_context_parameter_type_t type, - const char* value); +MT_KAHYPAR_API mt_kahypar_status_t mt_kahypar_set_context_parameter(mt_kahypar_context_t* context, + const mt_kahypar_context_parameter_type_t type, + const char* value, + mt_kahypar_error_t* error); /** * Sets all required parameters for a partitioning call. @@ -84,6 +84,23 @@ MT_KAHYPAR_API void mt_kahypar_set_partitioning_parameters(mt_kahypar_context_t* const double epsilon, const mt_kahypar_objective_t objective); +MT_KAHYPAR_API mt_kahypar_preset_type_t mt_kahypar_get_preset(const mt_kahypar_context_t* context); + +/** + * Get number of blocks. Result is unspecified if not previously initialized. + */ +MT_KAHYPAR_API mt_kahypar_partition_id_t mt_kahypar_get_num_blocks(const mt_kahypar_context_t* context); + +/** + * Get imbalance parameter epsilon. Result is unspecified if not previously initialized. + */ +MT_KAHYPAR_API double mt_kahypar_get_epsilon(const mt_kahypar_context_t* context); + +/** + * Get objective function. Result is unspecified if not previously initialized. + */ +MT_KAHYPAR_API mt_kahypar_objective_t mt_kahypar_get_objective(const mt_kahypar_context_t* context); + /** * Initializes the random number generator with the given seed value (not thread-safe). @@ -94,6 +111,8 @@ MT_KAHYPAR_API void mt_kahypar_set_seed(const size_t seed); * Sets individual target block weights for each block of the partition. * A balanced partition then satisfies that the weight of each block is smaller or equal than the * defined target block weight for the corresponding block. + * + * Note that the context is invalid if the number of block weights is not equal to the NUM_BLOCKS parameter. */ MT_KAHYPAR_API void mt_kahypar_set_individual_target_block_weights(mt_kahypar_context_t* context, const mt_kahypar_partition_id_t num_blocks, @@ -102,8 +121,21 @@ MT_KAHYPAR_API void mt_kahypar_set_individual_target_block_weights(mt_kahypar_co // ####################### Thread Pool Initialization ####################### +/** + * Must be called once for global initialization, before trying to create or partition any (hyper)graph. + * + * Note: if 'num_threads' is larger than the number of actually available CPUs, only a reduced number of threads will be used. + */ MT_KAHYPAR_API void mt_kahypar_initialize(const size_t num_threads, const bool interleaved_allocations); + +// ####################### Error Handling ####################### + +/** + * Frees the content of the error. + */ +MT_KAHYPAR_API void mt_kahypar_free_error_content(mt_kahypar_error_t* error); + // ####################### Load/Construct Hypergraph ####################### /** @@ -114,14 +146,17 @@ MT_KAHYPAR_API void mt_kahypar_initialize(const size_t num_threads, const bool i * Make sure that you partition the hypergraph with the same configuration as it is loaded. */ MT_KAHYPAR_API mt_kahypar_hypergraph_t mt_kahypar_read_hypergraph_from_file(const char* file_name, - const mt_kahypar_preset_type_t preset, - const mt_kahypar_file_format_type_t file_format); + const mt_kahypar_context_t* context, + const mt_kahypar_file_format_type_t file_format, + mt_kahypar_error_t* error); /** * Reads a target graph in Metis file format. The target graph can be used in the * 'mt_kahypar_map' function to map a (hyper)graph onto it. */ -MT_KAHYPAR_API mt_kahypar_target_graph_t* mt_kahypar_read_target_graph_from_file(const char* file_name); +MT_KAHYPAR_API mt_kahypar_target_graph_t* mt_kahypar_read_target_graph_from_file(const char* file_name, + const mt_kahypar_context_t* context, + mt_kahypar_error_t* error); /** * Constructs a hypergraph from a given adjacency array that specifies the hyperedges. @@ -134,13 +169,14 @@ MT_KAHYPAR_API mt_kahypar_target_graph_t* mt_kahypar_read_target_graph_from_file * \note For unweighted hypergraphs, you can pass nullptr to either hyperedge_weights or vertex_weights. * \note After construction, the arguments of this function are no longer needed and can be deleted. */ -MT_KAHYPAR_API mt_kahypar_hypergraph_t mt_kahypar_create_hypergraph(mt_kahypar_preset_type_t preset, +MT_KAHYPAR_API mt_kahypar_hypergraph_t mt_kahypar_create_hypergraph(const mt_kahypar_context_t* context, const mt_kahypar_hypernode_id_t num_vertices, const mt_kahypar_hyperedge_id_t num_hyperedges, const size_t* hyperedge_indices, const mt_kahypar_hyperedge_id_t* hyperedges, const mt_kahypar_hyperedge_weight_t* hyperedge_weights, - const mt_kahypar_hypernode_weight_t* vertex_weights); + const mt_kahypar_hypernode_weight_t* vertex_weights, + mt_kahypar_error_t* error); /** * Constructs a graph from a given edge list vector. @@ -152,12 +188,13 @@ MT_KAHYPAR_API mt_kahypar_hypergraph_t mt_kahypar_create_hypergraph(mt_kahypar_p * \note For unweighted graphs, you can pass nullptr to either hyperedge_weights or vertex_weights. * \note After construction, the arguments of this function are no longer needed and can be deleted. */ -MT_KAHYPAR_API mt_kahypar_hypergraph_t mt_kahypar_create_graph(const mt_kahypar_preset_type_t preset, +MT_KAHYPAR_API mt_kahypar_hypergraph_t mt_kahypar_create_graph(const mt_kahypar_context_t* context, const mt_kahypar_hypernode_id_t num_vertices, const mt_kahypar_hyperedge_id_t num_edges, const mt_kahypar_hypernode_id_t* edges, const mt_kahypar_hyperedge_weight_t* edge_weights, - const mt_kahypar_hypernode_weight_t* vertex_weights); + const mt_kahypar_hypernode_weight_t* vertex_weights, + mt_kahypar_error_t* error); /** * Constructs a target graph from a given edge list vector. The target graph can be used in the * 'mt_kahypar_map' function to map a (hyper)graph onto it. @@ -169,10 +206,12 @@ MT_KAHYPAR_API mt_kahypar_hypergraph_t mt_kahypar_create_graph(const mt_kahypar_ * \note For unweighted graphs, you can pass nullptr to either hyperedge_weights. * \note After construction, the arguments of this function are no longer needed and can be deleted. */ -MT_KAHYPAR_API mt_kahypar_target_graph_t* mt_kahypar_create_target_graph(const mt_kahypar_hypernode_id_t num_vertices, +MT_KAHYPAR_API mt_kahypar_target_graph_t* mt_kahypar_create_target_graph(const mt_kahypar_context_t* context, + const mt_kahypar_hypernode_id_t num_vertices, const mt_kahypar_hyperedge_id_t num_edges, const mt_kahypar_hypernode_id_t* edges, - const mt_kahypar_hyperedge_weight_t* edge_weights); + const mt_kahypar_hyperedge_weight_t* edge_weights, + mt_kahypar_error_t* error); /** * Deletes the (hyper)graph object. @@ -184,6 +223,8 @@ MT_KAHYPAR_API void mt_kahypar_free_hypergraph(mt_kahypar_hypergraph_t hypergrap */ MT_KAHYPAR_API void mt_kahypar_free_target_graph(mt_kahypar_target_graph_t* target_graph); +// ####################### Properties of Hypergraph ####################### + /** * Returns the number of nodes of the (hyper)graph. */ @@ -191,6 +232,8 @@ MT_KAHYPAR_API mt_kahypar_hypernode_id_t mt_kahypar_num_hypernodes(mt_kahypar_hy /** * Returns the number of (hyper)edges of the (hyper)graph. + * + * Note that for graphs, this returns the number of directed edges (i.e., twice the number of undirected edges). */ MT_KAHYPAR_API mt_kahypar_hyperedge_id_t mt_kahypar_num_hyperedges(mt_kahypar_hypergraph_t hypergraph); @@ -204,6 +247,67 @@ MT_KAHYPAR_API mt_kahypar_hypernode_id_t mt_kahypar_num_pins(mt_kahypar_hypergra */ MT_KAHYPAR_API mt_kahypar_hypernode_id_t mt_kahypar_hypergraph_weight(mt_kahypar_hypergraph_t hypergraph); +/** + * Returns the degree of the corresponding node. + */ +MT_KAHYPAR_API mt_kahypar_hyperedge_id_t mt_kahypar_hypernode_degree(mt_kahypar_hypergraph_t hypergraph, mt_kahypar_hypernode_id_t node); + +/** + * Returns the weight of the corresponding node. + */ +MT_KAHYPAR_API mt_kahypar_hypernode_weight_t mt_kahypar_hypernode_weight(mt_kahypar_hypergraph_t hypergraph, mt_kahypar_hypernode_id_t node); + +/** + * Returns the size of the corresponding hyperedge. + * + * Note that for graphs, the size is always 2. + */ +MT_KAHYPAR_API mt_kahypar_hypernode_id_t mt_kahypar_hyperedge_size(mt_kahypar_hypergraph_t hypergraph, mt_kahypar_hyperedge_id_t edge); + +/** + * Returns the weight of the corresponding edge. + */ +MT_KAHYPAR_API mt_kahypar_hyperedge_weight_t mt_kahypar_hyperedge_weight(mt_kahypar_hypergraph_t hypergraph, mt_kahypar_hyperedge_id_t edge); + +/** + * Writes the IDs of the hyperedges that are incident to the corresponding node into the provided buffer. + * The size of the provided array must be at least 'mt_kahypar_hypernode_degree(node)' + * (note that 'mt_kahypar_num_hyperedges' also provides an upper bound). + * + * \return the number of returned hyperedges + */ +MT_KAHYPAR_API mt_kahypar_hyperedge_id_t mt_kahypar_get_incident_hyperedges(mt_kahypar_hypergraph_t hypergraph, + mt_kahypar_hypernode_id_t node, + mt_kahypar_hyperedge_id_t* edge_buffer); + +/** + * Writes the IDs of the pins (i.e., hypernodes) in the corresponding hyperedge into the provided buffer. + * The size of the provided array must be at least 'mt_kahypar_hyperedge_size(edge)' + * (note that 'mt_kahypar_num_hypernodes' also provides an upper bound). + * + * \return the number of returned pins + */ +MT_KAHYPAR_API mt_kahypar_hypernode_id_t mt_kahypar_get_hyperedge_pins(mt_kahypar_hypergraph_t hypergraph, + mt_kahypar_hyperedge_id_t edge, + mt_kahypar_hypernode_id_t* pin_buffer); + +/** + * Returns whether 'hypergraph' is a graph (i.e., not a hypergraph). + */ +MT_KAHYPAR_API bool mt_kahypar_is_graph(mt_kahypar_hypergraph_t hypergraph); + +/** + * Source of the corresponding graph edge. + * Returns 0 if 'graph' is not a graph but a hypergraph. + */ +MT_KAHYPAR_API mt_kahypar_hypernode_id_t mt_kahypar_edge_source(mt_kahypar_hypergraph_t graph, mt_kahypar_hyperedge_id_t edge); + +/** + * Target of the corresponding graph edge. + * Returns 0 if 'graph' is not a graph but a hypergraph. + */ +MT_KAHYPAR_API mt_kahypar_hypernode_id_t mt_kahypar_edge_target(mt_kahypar_hypergraph_t graph, mt_kahypar_hyperedge_id_t edge); + // ####################### Fixed Vertices ####################### /** @@ -211,30 +315,46 @@ MT_KAHYPAR_API mt_kahypar_hypernode_id_t mt_kahypar_hypergraph_weight(mt_kahypar * The array should contain n entries (n = number of nodes). Each entry contains either the fixed vertex * block ID of the corresponding node or -1 if the node is not fixed to a block. */ -MT_KAHYPAR_API void mt_kahypar_add_fixed_vertices(mt_kahypar_hypergraph_t hypergraph, - mt_kahypar_partition_id_t* fixed_vertices, - mt_kahypar_partition_id_t num_blocks); +MT_KAHYPAR_API mt_kahypar_status_t mt_kahypar_add_fixed_vertices(mt_kahypar_hypergraph_t hypergraph, + const mt_kahypar_partition_id_t* fixed_vertices, + mt_kahypar_partition_id_t num_blocks, + mt_kahypar_error_t* error); /** * Reads fixed vertices from a file and stores them in the array to which 'fixed_vertices' points to. + * The array must be of size 'num_nodes'. If the number of entries in the file is different from 'num_nodes', + * an error is returned. */ -MT_KAHYPAR_API void mt_kahypar_read_fixed_vertices_from_file(const char* file_name, - mt_kahypar_partition_id_t* fixed_vertices); +MT_KAHYPAR_API mt_kahypar_status_t mt_kahypar_read_fixed_vertices_from_file(const char* file_name, + mt_kahypar_hypernode_id_t num_nodes, + mt_kahypar_partition_id_t* fixed_vertices, + mt_kahypar_error_t* error); /** * Adds fixed vertices to the (hyper)graph as specified in the fixed vertex file (expected in hMetis fix file format). * The file should contain n lines (n = number of nodes). Each line contains either the fixed vertex * block ID of the corresponding node or -1 if the node is not fixed to a block. */ -MT_KAHYPAR_API void mt_kahypar_add_fixed_vertices_from_file(mt_kahypar_hypergraph_t hypergraph, - const char* file_name, - mt_kahypar_partition_id_t num_blocks); +MT_KAHYPAR_API mt_kahypar_status_t mt_kahypar_add_fixed_vertices_from_file(mt_kahypar_hypergraph_t hypergraph, + const char* file_name, + mt_kahypar_partition_id_t num_blocks, + mt_kahypar_error_t* error); /** * Removes all fixed vertices from the hypergraph. */ MT_KAHYPAR_API void mt_kahypar_remove_fixed_vertices(mt_kahypar_hypergraph_t hypergraph); +/** + * Whether the corresponding node is a fixed vertex. + */ +MT_KAHYPAR_API bool mt_kahypar_is_fixed_vertex(mt_kahypar_hypergraph_t hypergraph, mt_kahypar_hypernode_id_t node); + +/** + * Block to which the node is fixed (-1 if not fixed). + */ +MT_KAHYPAR_API mt_kahypar_partition_id_t mt_kahypar_fixed_vertex_block(mt_kahypar_hypergraph_t hypergraph, mt_kahypar_hypernode_id_t node); + // ####################### Partition ####################### /** @@ -251,7 +371,8 @@ MT_KAHYPAR_API bool mt_kahypar_check_compatibility(mt_kahypar_hypergraph_t hyper * or mt_kahypar_set_partitioning_parameters(...). */ MT_KAHYPAR_API mt_kahypar_partitioned_hypergraph_t mt_kahypar_partition(mt_kahypar_hypergraph_t hypergraph, - mt_kahypar_context_t* context); + const mt_kahypar_context_t* context, + mt_kahypar_error_t* error); /** * Maps a (hyper)graph onto a target graph with the configuration specified in the partitioning context. @@ -270,7 +391,8 @@ MT_KAHYPAR_API mt_kahypar_partitioned_hypergraph_t mt_kahypar_partition(mt_kahyp */ MT_KAHYPAR_API mt_kahypar_partitioned_hypergraph_t mt_kahypar_map(mt_kahypar_hypergraph_t hypergraph, mt_kahypar_target_graph_t* target_graph, - mt_kahypar_context_t* context); + const mt_kahypar_context_t* context, + mt_kahypar_error_t* error); /** * Checks whether or not the given partitioned hypergraph can @@ -286,9 +408,10 @@ MT_KAHYPAR_API bool mt_kahypar_check_partition_compatibility(mt_kahypar_partitio * number of blocks of the given partition. * \note There is no guarantee that this call will find an improvement. */ -MT_KAHYPAR_API void mt_kahypar_improve_partition(mt_kahypar_partitioned_hypergraph_t partitioned_hg, - mt_kahypar_context_t* context, - const size_t num_vcycles); +MT_KAHYPAR_API mt_kahypar_status_t mt_kahypar_improve_partition(mt_kahypar_partitioned_hypergraph_t partitioned_hg, + const mt_kahypar_context_t* context, + const size_t num_vcycles, + mt_kahypar_error_t* error); /** * Improves a given mapping (using the V-cycle technique). @@ -297,41 +420,83 @@ MT_KAHYPAR_API void mt_kahypar_improve_partition(mt_kahypar_partitioned_hypergra * number of blocks of the given partition. * \note There is no guarantee that this call will find an improvement. */ -MT_KAHYPAR_API void mt_kahypar_improve_mapping(mt_kahypar_partitioned_hypergraph_t partitioned_hg, - mt_kahypar_target_graph_t* target_graph, - mt_kahypar_context_t* context, - const size_t num_vcycles); +MT_KAHYPAR_API mt_kahypar_status_t mt_kahypar_improve_mapping(mt_kahypar_partitioned_hypergraph_t partitioned_hg, + mt_kahypar_target_graph_t* target_graph, + const mt_kahypar_context_t* context, + const size_t num_vcycles, + mt_kahypar_error_t* error); /** * Constructs a partitioned (hyper)graph out of the given partition. */ MT_KAHYPAR_API mt_kahypar_partitioned_hypergraph_t mt_kahypar_create_partitioned_hypergraph(mt_kahypar_hypergraph_t hypergraph, - const mt_kahypar_preset_type_t preset, + const mt_kahypar_context_t* context, const mt_kahypar_partition_id_t num_blocks, - const mt_kahypar_partition_id_t* partition); + const mt_kahypar_partition_id_t* partition, + mt_kahypar_error_t* error); /** * Constructs a partitioned (hyper)graph from a given partition file. */ MT_KAHYPAR_API mt_kahypar_partitioned_hypergraph_t mt_kahypar_read_partition_from_file(mt_kahypar_hypergraph_t hypergraph, - const mt_kahypar_preset_type_t preset, + const mt_kahypar_context_t* context, const mt_kahypar_partition_id_t num_blocks, - const char* partition_file); + const char* partition_file, + mt_kahypar_error_t* error); /** * Writes a partition to a file. */ -MT_KAHYPAR_API void mt_kahypar_write_partition_to_file(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, - const char* partition_file); +MT_KAHYPAR_API mt_kahypar_status_t mt_kahypar_write_partition_to_file(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, + const char* partition_file, + mt_kahypar_error_t* error); + +// ####################### Partitioning Results ####################### + +/** + * Number of blocks of the partition. + */ +MT_KAHYPAR_API mt_kahypar_partition_id_t mt_kahypar_num_blocks(const mt_kahypar_partitioned_hypergraph_t partitioned_hg); + +/** + * Weight of the corresponding block. + */ +MT_KAHYPAR_API mt_kahypar_hypernode_weight_t mt_kahypar_block_weight(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, + const mt_kahypar_partition_id_t block); + +/** + * Block to which the corresponding hypernode is assigned. + */ +MT_KAHYPAR_API mt_kahypar_partition_id_t mt_kahypar_block_id(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, + const mt_kahypar_hypernode_id_t node); + +/** + * Number of incident cut hyperedges of the corresponding node. + */ +MT_KAHYPAR_API mt_kahypar_hyperedge_id_t mt_kahypar_num_incident_cut_hyperedges(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, + const mt_kahypar_hypernode_id_t node); + +/** + * Number of distinct blocks to which the pins of the corresponding hyperedge are assigned. + */ +MT_KAHYPAR_API mt_kahypar_partition_id_t mt_kahypar_connectivity(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, + const mt_kahypar_hyperedge_id_t edge); + +/** + * Number of pins assigned to the corresponding block in the given hyperedge. + */ +MT_KAHYPAR_API mt_kahypar_hypernode_id_t mt_kahypar_num_pins_in_block(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, + const mt_kahypar_hyperedge_id_t edge, + const mt_kahypar_partition_id_t block); /** - * Extracts a partition from a partitioned (hyper)graph. + * Extracts a partition from a partitioned (hyper)graph. The size of the provided array must be at least the number of nodes. */ MT_KAHYPAR_API void mt_kahypar_get_partition(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, mt_kahypar_partition_id_t* partition); /** - * Extracts the weight of each block from a partition. + * Extracts the weight of each block from a partition. The size of the provided array must be at least the number of blocks. */ MT_KAHYPAR_API void mt_kahypar_get_block_weights(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, mt_kahypar_hypernode_weight_t* block_weights); diff --git a/include/mtkahypartypes.h b/include/mtkahypartypes.h index e14afa53b..ddd9935d9 100644 --- a/include/mtkahypartypes.h +++ b/include/mtkahypartypes.h @@ -1,5 +1,7 @@ -#ifndef TYPEDEFS_H -#define TYPEDEFS_H +#ifndef MTKAHYPAR_TYPEDEFS_H +#define MTKAHYPAR_TYPEDEFS_H + +#include typedef enum { STATIC_GRAPH, @@ -18,12 +20,44 @@ typedef enum { NULLPTR_PARTITION } mt_kahypar_partition_type_t; +/** + * Either success or type of the error. + */ +typedef enum { + SUCCESS = 0, + // input files are not found or an input is syntactically or semantically invalid + INVALID_INPUT, + // an algorithm parameter has an invalid value + INVALID_PARAMETER, + // the attempted operation is incompatible with the config, + // e.g. the hypergraph type is incompatible with the preset type + UNSUPPORTED_OPERATION, + // errors originating from the OS, e.g. out of memory or failed mmap + SYSTEM_ERROR, + OTHER_ERROR +} mt_kahypar_status_t; + +/** + * Indicates whether an error occured. + * + * If an error occurs, mt_kahypar_free_error_content needs to be called + * afterwards to free the space allocated for the error message. + */ +typedef struct { + // null-terminated error message + const char* msg; + // length of error message without null terminator + size_t msg_len; + // either success or the error type + mt_kahypar_status_t status; +} mt_kahypar_error_t; + struct mt_kahypar_context_s; typedef struct mt_kahypar_context_s mt_kahypar_context_t; struct mt_kahypar_target_graph_s; typedef struct mt_kahypar_target_graph_s mt_kahypar_target_graph_t; -struct mt_kahypar_hypergraph_s; +typedef struct mt_kahypar_hypergraph_s mt_kahypar_hypergraph_s; typedef struct { mt_kahypar_hypergraph_s* hypergraph; mt_kahypar_hypergraph_type_t type; @@ -34,7 +68,7 @@ typedef struct { mt_kahypar_hypergraph_type_t type; } mt_kahypar_hypergraph_const_t; -struct mt_kahypar_partitioned_hypergraph_s; +typedef struct mt_kahypar_partitioned_hypergraph_s mt_kahypar_partitioned_hypergraph_s; typedef struct { mt_kahypar_partitioned_hypergraph_s* partitioned_hg; mt_kahypar_partition_type_t type; @@ -55,15 +89,15 @@ typedef int mt_kahypar_partition_id_t; * Configurable parameters of the partitioning context. */ typedef enum { - // number of blocks of the partition + // number of blocks of the partition (integer) NUM_BLOCKS, - // imbalance factor + // imbalance factor (float) EPSILON, - // objective function (either 'cut' or 'km1') + // objective function (either 'cut', 'km1' or 'soed') OBJECTIVE, - // number of V-cycles + // number of V-cycles (integer) NUM_VCYCLES, - // disables or enables logging + // enables logging (bool: 1/0) VERBOSE } mt_kahypar_context_parameter_type_t; @@ -80,17 +114,15 @@ typedef enum { * Preset types for partitioning context. */ typedef enum { - // deterministic partitioning mode (corresponds to Mt-KaHyPar-SDet) + // configuration for deterministic partitioning DETERMINISTIC, - // partitioning mode for partitioning a (hyper)graph into a large number of blocks + // configuration for partitioning (hyper)graphs into a large number of blocks (e.g. >= 1024 blocks) LARGE_K, - // computes good partitions very fast (corresponds to Mt-KaHyPar-D) + // computes good partitions very fast DEFAULT, - // extends default preset with flow-based refinement - // -> computes high-quality partitions (corresponds to Mt-KaHyPar-D-F) + // computes high-quality partitions (uses flow-based refinement) QUALITY, - // n-level code with flow-based refinement - // => highest quality configuration (corresponds to Mt-KaHyPar-Q-F) + // highest-quality configuration (uses n-level coarsening and flow-based refinement) HIGHEST_QUALITY } mt_kahypar_preset_type_t; @@ -112,4 +144,4 @@ typedef enum { # endif #endif -#endif // TYPEDEFS_H \ No newline at end of file +#endif // MTKAHYPAR_TYPEDEFS_H diff --git a/lib/examples/construct_and_read_graph.cc b/lib/examples/construct_and_read_graph.cc new file mode 100644 index 000000000..49f8c3b8c --- /dev/null +++ b/lib/examples/construct_and_read_graph.cc @@ -0,0 +1,123 @@ +#include +#include +#include +#include + +#include + +// Install library interface via 'sudo make install.mtkahypar' in build folder +// Compile with: g++ -std=c++14 -DNDEBUG -O3 construct_and_read_graph.cc -o example -lmtkahypar +int main(int argc, char* argv[]) { + mt_kahypar_error_t error{}; + + // Initialize thread pool + mt_kahypar_initialize( + std::thread::hardware_concurrency() /* use all available cores */, + true /* activate interleaved NUMA allocation policy */ ); + + // In the following, we construct a graph with 5 nodes and 6 edges + const mt_kahypar_hypernode_id_t num_nodes = 5; + const mt_kahypar_hyperedge_id_t num_edges = 6; + + // We represent the edges of the graph as edge list vector. + // Two consecutive node IDs in the edge list vector form + // an undirected edge in the graph. + std::unique_ptr edges = + std::make_unique(12); + edges[0] = 0; edges[1] = 1; // first edge connects node 0 and 1 + edges[2] = 0; edges[3] = 2; // second edge connects node 0 and 2 + edges[4] = 1; edges[5] = 2; // third edge connects node 1 and 2 + edges[6] = 1; edges[7] = 3; // fourth edge connects node 1 and 3 + edges[8] = 2; edges[9] = 3; // fifth edge connects node 2 and 3 + edges[10] = 3; edges[11] = 4; // sixth edge connects node 3 and 4 + + // Define node weights + std::unique_ptr node_weights = + std::make_unique(5); + node_weights[0] = 2; node_weights[1] = 1; node_weights[2] = 2; + node_weights[3] = 4; node_weights[4] = 1; + + // Define edge weights + std::unique_ptr edge_weights = + std::make_unique(6); + edge_weights[0] = 1; edge_weights[1] = 10; + edge_weights[2] = 1; edge_weights[3] = 10; + edge_weights[3] = 1; edge_weights[4] = 10; + + // Construct graph + mt_kahypar_context_t* context = mt_kahypar_context_from_preset(DEFAULT); + mt_kahypar_hypergraph_t graph = + mt_kahypar_create_graph(context, num_nodes, num_edges, + edges.get(), edge_weights.get(), node_weights.get(), &error); + if (graph.hypergraph == nullptr) { + std::cout << error.msg << std::endl; std::exit(1); + } + + std::cout << "Number of Nodes = " << mt_kahypar_num_hypernodes(graph) << std::endl; + std::cout << "Number of Edges = " << mt_kahypar_num_hyperedges(graph) << std::endl; + std::cout << "Total Weight of Graph = " << mt_kahypar_hypergraph_weight(graph) << std::endl; + + // Iterate over the nodes + std::vector incident_edges_buffer; + for (mt_kahypar_hypernode_id_t node = 0; node < mt_kahypar_num_hypernodes(graph); ++node) { + std::cout << "Node " << node << std::endl; + std::cout << "Node Degree = " << mt_kahypar_hypernode_degree(graph, node) << std::endl; + std::cout << "Node Weight = " << mt_kahypar_hypernode_weight(graph, node) << std::endl; + + // Get incident edges of the node by writing them to the buffer + incident_edges_buffer.resize(mt_kahypar_hypernode_degree(graph, node)); + mt_kahypar_get_incident_hyperedges(graph, node, incident_edges_buffer.data()); + + // Iterate over all neighbors + std::cout << "Neighbors: "; + for (mt_kahypar_hyperedge_id_t edge: incident_edges_buffer) { + mt_kahypar_hypernode_id_t neighbor = mt_kahypar_edge_target(graph, edge); + std::cout << neighbor << " "; + } + std::cout << std::endl; + } + std::cout << std::endl; + + // Iterate over the edges + // Note that the graph has six undirected edges, but internally we represent + // the graph as a directed graph. Thus, we iterate over 12 directed edges here. + // For an directed edge (u,v), we call u the source and v the target node. + for (mt_kahypar_hyperedge_id_t edge = 0; edge < mt_kahypar_num_hyperedges(graph); ++edge) { + std::cout << "Edge " << edge << std::endl; + std::cout << "Edge Weight = " << mt_kahypar_hyperedge_weight(graph, edge) << std::endl; + std::cout << "Source Node = " << mt_kahypar_edge_source(graph, edge) << std::endl; + std::cout << "Target Node = " << mt_kahypar_edge_target(graph, edge) << std::endl; + } + std::cout << std::endl; + + // Create a partition of the graph + const mt_kahypar_partition_id_t number_of_blocks = 3; + const std::vector partition = {0, 0, 1, 1, 2}; + mt_kahypar_partitioned_hypergraph_t partitioned_graph = + mt_kahypar_create_partitioned_hypergraph(graph, context, number_of_blocks, partition.data(), &error); + if (partitioned_graph.partitioned_hg == nullptr) { + std::cout << error.msg << std::endl; std::exit(1); + } + + // Iterate over all nodes + for (mt_kahypar_hypernode_id_t node = 0; node < mt_kahypar_num_hypernodes(graph); ++node) { + std::cout << "Node " << node << std::endl; + std::cout << "Block ID = " + << mt_kahypar_block_id(partitioned_graph, node) << std::endl; + std::cout << "Number of Incident Cut Edges = " + << mt_kahypar_num_incident_cut_hyperedges(partitioned_graph, node) << std::endl; + } + std::cout << std::endl; + + // Iterate over all edges + for (mt_kahypar_hyperedge_id_t edge = 0; edge < mt_kahypar_num_hyperedges(graph); ++edge) { + std::cout << "Edge " << edge << std::endl; + // Print number of blocks connected by edge + std::cout << "Connectivity = " << mt_kahypar_connectivity(partitioned_graph, edge) << std::endl; + } + std::cout << std::endl; + + mt_kahypar_free_context(context); + mt_kahypar_free_hypergraph(graph); + mt_kahypar_free_partitioned_hypergraph(partitioned_graph); +} diff --git a/lib/examples/construct_and_read_hypergraph.cc b/lib/examples/construct_and_read_hypergraph.cc new file mode 100644 index 000000000..9941a4b20 --- /dev/null +++ b/lib/examples/construct_and_read_hypergraph.cc @@ -0,0 +1,136 @@ +#include +#include +#include +#include + +#include + +// Install library interface via 'sudo make install.mtkahypar' in build folder +// Compile with: g++ -std=c++14 -DNDEBUG -O3 construct_and_read_hypergraph.cc -o example -lmtkahypar +int main(int argc, char* argv[]) { + mt_kahypar_error_t error{}; + + // Initialize thread pool + mt_kahypar_initialize( + std::thread::hardware_concurrency() /* use all available cores */, + true /* activate interleaved NUMA allocation policy */ ); + + // In the following, we construct a hypergraph with 7 nodes and 4 hyperedges + const mt_kahypar_hypernode_id_t num_nodes = 7; + const mt_kahypar_hyperedge_id_t num_hyperedges = 4; + + // The hyperedge indices points to the hyperedge vector and defines the + // the ranges containing the pins of each hyperedge + std::unique_ptr hyperedge_indices = std::make_unique(5); + hyperedge_indices[0] = 0; hyperedge_indices[1] = 2; hyperedge_indices[2] = 6; + hyperedge_indices[3] = 9; hyperedge_indices[4] = 12; + + std::unique_ptr hyperedges = + std::make_unique(12); + // First hyperedge contains nodes with ID 0 and 2 + hyperedges[0] = 0; hyperedges[1] = 2; + // Second hyperedge contains nodes with ID 0, 1, 3 and 4 + hyperedges[2] = 0; hyperedges[3] = 1; hyperedges[4] = 3; hyperedges[5] = 4; + // Third hyperedge contains nodes with ID 3, 4 and 6 + hyperedges[6] = 3; hyperedges[7] = 4; hyperedges[8] = 6; + // Fourth hyperedge contains nodes with ID 2, 5 and 6 + hyperedges[9] = 2; hyperedges[10] = 5; hyperedges[11] = 6; + + // Define node weights + std::unique_ptr node_weights = + std::make_unique(7); + node_weights[0] = 2; node_weights[1] = 1; node_weights[2] = 2; node_weights[3] = 4; + node_weights[4] = 1; node_weights[5] = 3; node_weights[6] = 3; + + // Define hyperedge weights + std::unique_ptr hyperedge_weights = + std::make_unique(4); + hyperedge_weights[0] = 1; hyperedge_weights[1] = 10; + hyperedge_weights[2] = 1; hyperedge_weights[3] = 10; + + // Construct hypergraph + mt_kahypar_context_t* context = mt_kahypar_context_from_preset(DEFAULT); + mt_kahypar_hypergraph_t hypergraph = + mt_kahypar_create_hypergraph(context, num_nodes, num_hyperedges, + hyperedge_indices.get(), hyperedges.get(), + hyperedge_weights.get(), node_weights.get(), &error); + if (hypergraph.hypergraph == nullptr) { + std::cout << error.msg << std::endl; std::exit(1); + } + + std::cout << "Number of Nodes = " << mt_kahypar_num_hypernodes(hypergraph) << std::endl; + std::cout << "Number of Hyperedges = " << mt_kahypar_num_hyperedges(hypergraph) << std::endl; + std::cout << "Number of Pins = " << mt_kahypar_num_pins(hypergraph) << std::endl; + std::cout << "Total Weight of Hypergraph = " << mt_kahypar_hypergraph_weight(hypergraph) << std::endl; + + // Iterate over the nodes + std::vector incident_edges_buffer; + for (mt_kahypar_hypernode_id_t node = 0; node < mt_kahypar_num_hypernodes(hypergraph); ++node) { + std::cout << "Node " << node << std::endl; + std::cout << "Node Degree = " << mt_kahypar_hypernode_degree(hypergraph, node) << std::endl; + std::cout << "Node Weight = " << mt_kahypar_hypernode_weight(hypergraph, node) << std::endl; + + // Get incident hyperedges of the node by writing them to the buffer + incident_edges_buffer.resize(mt_kahypar_hypernode_degree(hypergraph, node)); + mt_kahypar_get_incident_hyperedges(hypergraph, node, incident_edges_buffer.data()); + + // Iterate over all incident hyperedges + std::cout << "Incident Hyperedges: "; + for (mt_kahypar_hyperedge_id_t edge: incident_edges_buffer) { + std::cout << edge << " "; + } + std::cout << std::endl; + } + std::cout << std::endl; + + // Iterate over the hyperedges + std::vector pins_buffer; + for (mt_kahypar_hyperedge_id_t edge = 0; edge < mt_kahypar_num_hyperedges(hypergraph); ++edge) { + std::cout << "Edge " << edge << std::endl; + std::cout << "Edge Size = " << mt_kahypar_hyperedge_size(hypergraph, edge) << std::endl; + std::cout << "Edge Weight = " << mt_kahypar_hyperedge_weight(hypergraph, edge) << std::endl; + + // Get pins of the hyperedge by writing them to the buffer + pins_buffer.resize(mt_kahypar_hyperedge_size(hypergraph, edge)); + mt_kahypar_get_hyperedge_pins(hypergraph, edge, pins_buffer.data()); + + // Iterate over all pins of the hyperedge + std::cout << "Pins: "; + for (mt_kahypar_hypernode_id_t pin: pins_buffer) { + std::cout << pin << " "; + } + std::cout << std::endl; + } + std::cout << std::endl; + + // Create a partition of the graph + const mt_kahypar_partition_id_t number_of_blocks = 3; + const std::vector partition = {0, 0 ,0, 1, 1, 2, 2}; + mt_kahypar_partitioned_hypergraph_t partitioned_hg = + mt_kahypar_create_partitioned_hypergraph(hypergraph, context, number_of_blocks, partition.data(), &error); + if (partitioned_hg.partitioned_hg == nullptr) { + std::cout << error.msg << std::endl; std::exit(1); + } + + // Iterate over all nodes + for (mt_kahypar_hypernode_id_t node = 0; node < mt_kahypar_num_hypernodes(hypergraph); ++node) { + std::cout << "Node " << node << std::endl; + std::cout << "Block ID = " + << mt_kahypar_block_id(partitioned_hg, node) << std::endl; + std::cout << "Number of Incident Cut Edges = " + << mt_kahypar_num_incident_cut_hyperedges(partitioned_hg, node) << std::endl; + } + std::cout << std::endl; + + // Iterate over all edges + for (mt_kahypar_hyperedge_id_t edge = 0; edge < mt_kahypar_num_hyperedges(hypergraph); ++edge) { + std::cout << "Edge " << edge << std::endl; + // Print number of blocks connected by edge + std::cout << "Connectivity = " << mt_kahypar_connectivity(partitioned_hg, edge) << std::endl; + } + std::cout << std::endl; + + mt_kahypar_free_context(context); + mt_kahypar_free_hypergraph(hypergraph); + mt_kahypar_free_partitioned_hypergraph(partitioned_hg); +} diff --git a/lib/examples/construct_graph.cc b/lib/examples/construct_graph.cc deleted file mode 100644 index 6d215911a..000000000 --- a/lib/examples/construct_graph.cc +++ /dev/null @@ -1,62 +0,0 @@ -#include -#include -#include -#include - -#include - -// Install library interface via 'sudo make install.mtkahypar' in build folder -// Compile with: g++ -std=c++14 -DNDEBUG -O3 construct_graph.cc -o example -lmtkahypar -int main(int argc, char* argv[]) { - - // Initialize thread pool - mt_kahypar_initialize( - std::thread::hardware_concurrency() /* use all available cores */, - true /* activate interleaved NUMA allocation policy */ ); - - // In the following, we construct a graph with 5 nodes and 6 edges - const mt_kahypar_hypernode_id_t num_nodes = 5; - const mt_kahypar_hyperedge_id_t num_edges = 6; - - // We represent the edges of the graph as edge list vector. - // Two consecutive node IDs in the edge list vector form - // an undirected edge in the graph. - std::unique_ptr edges = - std::make_unique(12); - // The first undirected edge connects node 0 and 1 - edges[0] = 0; edges[1] = 1; - // The second undirected edge connects node 0 and 2 - edges[2] = 0; edges[3] = 2; - // The third undirected edge connects node 1 and 2 - edges[4] = 1; edges[5] = 2; - // The fourth undirected edge connects node 1 and 3 - edges[6] = 1; edges[7] = 3; - // The fifth undirected edge connects node 2 and 3 - edges[8] = 2; edges[9] = 3; - // The sixth undirected edge connects node 3 and 4 - edges[10] = 3; edges[11] = 4; - - // Define node weights - std::unique_ptr node_weights = - std::make_unique(5); - node_weights[0] = 2; node_weights[1] = 1; node_weights[2] = 2; - node_weights[3] = 4; node_weights[4] = 1; - - // Define edge weights - std::unique_ptr edge_weights = - std::make_unique(6); - edge_weights[0] = 1; edge_weights[1] = 10; - edge_weights[2] = 1; edge_weights[3] = 10; - edge_weights[3] = 1; edge_weights[4] = 10; - - // Construct graph - mt_kahypar_hypergraph_t graph = - mt_kahypar_create_graph(DEFAULT, num_nodes, num_edges, - edges.get(), edge_weights.get(), node_weights.get()); - - std::cout << "Number of Nodes = " << mt_kahypar_num_hypernodes(graph) << std::endl; - std::cout << "Number of Edges = " << mt_kahypar_num_hyperedges(graph) << std::endl; - std::cout << "Total Weight of Graph = " << mt_kahypar_hypergraph_weight(graph) << std::endl; - - mt_kahypar_free_hypergraph(graph); -} \ No newline at end of file diff --git a/lib/examples/construct_hypergraph.cc b/lib/examples/construct_hypergraph.cc deleted file mode 100644 index 9940f4bd8..000000000 --- a/lib/examples/construct_hypergraph.cc +++ /dev/null @@ -1,62 +0,0 @@ -#include -#include -#include -#include - -#include - -// Install library interface via 'sudo make install.mtkahypar' in build folder -// Compile with: g++ -std=c++14 -DNDEBUG -O3 construct_graph.cc -o example -lmtkahypar -int main(int argc, char* argv[]) { - - // Initialize thread pool - mt_kahypar_initialize( - std::thread::hardware_concurrency() /* use all available cores */, - true /* activate interleaved NUMA allocation policy */ ); - - // In the following, we construct a hypergraph with 7 nodes and 4 hyperedges - const mt_kahypar_hypernode_id_t num_nodes = 7; - const mt_kahypar_hyperedge_id_t num_hyperedges = 4; - - // The hyperedge indices points to the hyperedge vector and defines the - // the ranges containing the pins of each hyperedge - std::unique_ptr hyperedge_indices = std::make_unique(5); - hyperedge_indices[0] = 0; hyperedge_indices[1] = 2; hyperedge_indices[2] = 6; - hyperedge_indices[3] = 9; hyperedge_indices[4] = 12; - - std::unique_ptr hyperedges = - std::make_unique(12); - // First hyperedge contains nodes with ID 0 and 2 - hyperedges[0] = 0; hyperedges[1] = 2; - // Second hyperedge contains nodes with ID 0, 1, 3 and 4 - hyperedges[2] = 0; hyperedges[3] = 1; hyperedges[4] = 3; hyperedges[5] = 4; - // Third hyperedge contains nodes with ID 3, 4 and 6 - hyperedges[6] = 3; hyperedges[7] = 4; hyperedges[8] = 6; - // Fourth hyperedge contains nodes with ID 2, 5 and 6 - hyperedges[9] = 2; hyperedges[10] = 5; hyperedges[11] = 6; - - // Define node weights - std::unique_ptr node_weights = - std::make_unique(7); - node_weights[0] = 2; node_weights[1] = 1; node_weights[2] = 2; node_weights[3] = 4; - node_weights[4] = 1; node_weights[5] = 3; node_weights[6] = 3; - - // Define hyperedge weights - std::unique_ptr hyperedge_weights = - std::make_unique(4); - hyperedge_weights[0] = 1; hyperedge_weights[1] = 10; - hyperedge_weights[2] = 1; hyperedge_weights[3] = 10; - - // Construct hypergraph for DEFAULT preset - mt_kahypar_hypergraph_t hypergraph = - mt_kahypar_create_hypergraph(DEFAULT, num_nodes, num_hyperedges, - hyperedge_indices.get(), hyperedges.get(), - hyperedge_weights.get(), node_weights.get()); - - std::cout << "Number of Nodes = " << mt_kahypar_num_hypernodes(hypergraph) << std::endl; - std::cout << "Number of Hyperedges = " << mt_kahypar_num_hyperedges(hypergraph) << std::endl; - std::cout << "Number of Pins = " << mt_kahypar_num_pins(hypergraph) << std::endl; - std::cout << "Total Weight of Hypergraph = " << mt_kahypar_hypergraph_weight(hypergraph) << std::endl; - - mt_kahypar_free_hypergraph(hypergraph); -} \ No newline at end of file diff --git a/lib/examples/improve_partition.cc b/lib/examples/improve_partition.cc index 1aab617f8..ad3894421 100644 --- a/lib/examples/improve_partition.cc +++ b/lib/examples/improve_partition.cc @@ -1,3 +1,4 @@ +#include #include #include #include @@ -8,6 +9,7 @@ // Install library interface via 'sudo make install.mtkahypar' in build folder // Compile with: g++ -std=c++14 -DNDEBUG -O3 improve_partition.cc -o example -lmtkahypar int main(int argc, char* argv[]) { + mt_kahypar_error_t error{}; // Initialize thread pool mt_kahypar_initialize( @@ -15,8 +17,7 @@ int main(int argc, char* argv[]) { true /* activate interleaved NUMA allocation policy */ ); // Setup partitioning context - mt_kahypar_context_t* context = mt_kahypar_context_new(); - mt_kahypar_load_preset(context, DEFAULT /* corresponds to MT-KaHyPar-D */); + mt_kahypar_context_t* context = mt_kahypar_context_from_preset(DEFAULT); // In the following, we partition a hypergraph into eight blocks // with an allowed imbalance of 3% and optimize the connective metric (KM1) mt_kahypar_set_partitioning_parameters(context, @@ -24,24 +25,36 @@ int main(int argc, char* argv[]) { KM1 /* objective function */); mt_kahypar_set_seed(42 /* seed */); // Enable logging - mt_kahypar_set_context_parameter(context, VERBOSE, "1"); + mt_kahypar_status_t status = + mt_kahypar_set_context_parameter(context, VERBOSE, "1", &error); + assert(status == SUCCESS); // Load Hypergraph for DEFAULT preset mt_kahypar_hypergraph_t hypergraph = mt_kahypar_read_hypergraph_from_file("ibm01.hgr", - DEFAULT, HMETIS /* file format */); + context, HMETIS /* file format */, &error); + if (hypergraph.hypergraph == nullptr) { + std::cout << error.msg << std::endl; std::exit(1); + } // Read Partition File, which we improve with the QUALITY preset + mt_kahypar_context_t* context_quality = mt_kahypar_context_from_preset(QUALITY); + mt_kahypar_set_partitioning_parameters(context_quality, 8, 0.03, KM1); + status = mt_kahypar_set_context_parameter(context_quality, VERBOSE, "1", &error); + assert(status == SUCCESS); + mt_kahypar_partitioned_hypergraph_t partitioned_hg = mt_kahypar_read_partition_from_file( - hypergraph, QUALITY, 8 /* number of blocks */, "ibm01.hgr.part8"); - const double km1_before = mt_kahypar_km1(partitioned_hg); + hypergraph, context_quality, 8 /* number of blocks */, "ibm01.hgr.part8", &error); + if (partitioned_hg.partitioned_hg == nullptr) { + std::cout << error.msg << std::endl; std::exit(1); + } // Improve Partition - mt_kahypar_load_preset(context, QUALITY /* use quality preset for improvement */); - mt_kahypar_improve_partition(partitioned_hg, context, - 1 /* perform one multilevel improvement cycle (also called V-cycle) */); - const double km1_after = mt_kahypar_km1(partitioned_hg); + const int km1_before = mt_kahypar_km1(partitioned_hg); + mt_kahypar_improve_partition(partitioned_hg, context_quality, + 1 /* perform one multilevel improvement cycle (also called V-cycle) */, &error); + const int km1_after = mt_kahypar_km1(partitioned_hg); // Output Results std::cout << "Partitioning Results:" << std::endl; @@ -49,6 +62,7 @@ int main(int argc, char* argv[]) { std::cout << "Km1 after Improvement Cycle = " << km1_after << std::endl; mt_kahypar_free_context(context); + mt_kahypar_free_context(context_quality); mt_kahypar_free_hypergraph(hypergraph); mt_kahypar_free_partitioned_hypergraph(partitioned_hg); -} \ No newline at end of file +} diff --git a/lib/examples/map_hypergraph_onto_target_graph.cc b/lib/examples/map_hypergraph_onto_target_graph.cc index 460276586..5db9c1a87 100644 --- a/lib/examples/map_hypergraph_onto_target_graph.cc +++ b/lib/examples/map_hypergraph_onto_target_graph.cc @@ -1,3 +1,4 @@ +#include #include #include #include @@ -8,6 +9,7 @@ // Install library interface via 'sudo make install.mtkahypar' in build folder // Compile with: g++ -std=c++14 -DNDEBUG -O3 map_hypergraph_onto_target_graph.cc -o example -lmtkahypar int main(int argc, char* argv[]) { + mt_kahypar_error_t error{}; // Initialize thread pool mt_kahypar_initialize( @@ -15,8 +17,7 @@ int main(int argc, char* argv[]) { true /* activate interleaved NUMA allocation policy */ ); // Setup partitioning context - mt_kahypar_context_t* context = mt_kahypar_context_new(); - mt_kahypar_load_preset(context, DEFAULT /* corresponds to MT-KaHyPar-D */); + mt_kahypar_context_t* context = mt_kahypar_context_from_preset(DEFAULT); // In the following, we map a hypergraph into target graph with 8 nodes // with an allowed imbalance of 3% mt_kahypar_set_partitioning_parameters(context, @@ -24,20 +25,31 @@ int main(int argc, char* argv[]) { KM1 /* objective function - not relevant for mapping */); mt_kahypar_set_seed(42 /* seed */); // Enable logging - mt_kahypar_set_context_parameter(context, VERBOSE, "1"); + mt_kahypar_status_t status = + mt_kahypar_set_context_parameter(context, VERBOSE, "1", &error); + assert(status == SUCCESS); // Load Hypergraph for DEFAULT preset mt_kahypar_hypergraph_t hypergraph = mt_kahypar_read_hypergraph_from_file("ibm01.hgr", - DEFAULT, HMETIS /* file format */); + context, HMETIS /* file format */, &error); + if (hypergraph.hypergraph == nullptr) { + std::cout << error.msg << std::endl; std::exit(1); + } // Read target graph file in Metis file format mt_kahypar_target_graph_t* target_graph = - mt_kahypar_read_target_graph_from_file("target.graph"); + mt_kahypar_read_target_graph_from_file("target.graph", context, &error); + if (target_graph == nullptr) { + std::cout << error.msg << std::endl; std::exit(1); + } // Map hypergraph onto target graph mt_kahypar_partitioned_hypergraph_t partitioned_hg = - mt_kahypar_map(hypergraph, target_graph, context); + mt_kahypar_map(hypergraph, target_graph, context, &error); + if (partitioned_hg.partitioned_hg == nullptr) { + std::cout << error.msg << std::endl; std::exit(1); + } // Extract Mapping std::unique_ptr mapping = @@ -51,7 +63,7 @@ int main(int argc, char* argv[]) { // Compute Metrics const double imbalance = mt_kahypar_imbalance(partitioned_hg, context); - const double steiner_tree_metric = mt_kahypar_steiner_tree(partitioned_hg, target_graph); + const int steiner_tree_metric = mt_kahypar_steiner_tree(partitioned_hg, target_graph); // Output Results std::cout << "Partitioning Results:" << std::endl; @@ -65,4 +77,4 @@ int main(int argc, char* argv[]) { mt_kahypar_free_hypergraph(hypergraph); mt_kahypar_free_partitioned_hypergraph(partitioned_hg); mt_kahypar_free_target_graph(target_graph); -} \ No newline at end of file +} diff --git a/lib/examples/partition_c_example.c b/lib/examples/partition_c_example.c new file mode 100644 index 000000000..94c005a87 --- /dev/null +++ b/lib/examples/partition_c_example.c @@ -0,0 +1,55 @@ +#include +#include + +#include + +// Install library interface via 'sudo make install.mtkahypar' in build folder +// Compile with: gcc -std=c99 -DNDEBUG -O3 partition_c_example.c -o example -lmtkahypar +int main(int argc, char* argv[]) { + mt_kahypar_error_t error = {0}; + + // Initialize + mt_kahypar_initialize( + 4, /* use 4 threads */ + true /* activate interleaved NUMA allocation policy */ ); + + // Setup partitioning context + mt_kahypar_context_t* context = mt_kahypar_context_from_preset(DEFAULT); + // In the following, we partition a graph into two blocks + // with an allowed imbalance of 3% and optimize the edge cut (CUT) + mt_kahypar_set_partitioning_parameters(context, + 2 /* number of blocks */, 0.03 /* imbalance parameter */, + CUT /* objective function */); + mt_kahypar_set_seed(42 /* seed */); + // Enable logging + mt_kahypar_status_t status = + mt_kahypar_set_context_parameter(context, VERBOSE, "1", &error); + assert(status == SUCCESS); + + // Read Graph + mt_kahypar_hypergraph_t graph = mt_kahypar_read_hypergraph_from_file( + "delaunay_n15.graph", context, METIS /* file format */, &error); + if (graph.hypergraph == NULL) { + printf("%s\n", error.msg); return 1; + } + + // Partition Graph + mt_kahypar_partitioned_hypergraph_t partitioned_graph = + mt_kahypar_partition(graph, context, &error); + if (partitioned_graph.partitioned_hg == NULL) { + printf("%s\n", error.msg); return 1; + } + + // Compute Metrics + const double imbalance = mt_kahypar_imbalance(partitioned_graph, context); + const int cut = mt_kahypar_cut(partitioned_graph); + + // Output Results + printf("Partitioning Results:\n"); + printf("Imbalance = %f\n", imbalance); + printf("Cut = %d\n", cut); + + mt_kahypar_free_context(context); + mt_kahypar_free_hypergraph(graph); + mt_kahypar_free_partitioned_hypergraph(partitioned_graph); +} diff --git a/lib/examples/partition_graph.cc b/lib/examples/partition_graph.cc index 6a5a2df13..29156aa66 100644 --- a/lib/examples/partition_graph.cc +++ b/lib/examples/partition_graph.cc @@ -1,3 +1,4 @@ +#include #include #include #include @@ -8,15 +9,15 @@ // Install library interface via 'sudo make install.mtkahypar' in build folder // Compile with: g++ -std=c++14 -DNDEBUG -O3 partition_graph.cc -o example -lmtkahypar int main(int argc, char* argv[]) { + mt_kahypar_error_t error{}; - // Initialize thread pool + // Initialize mt_kahypar_initialize( std::thread::hardware_concurrency() /* use all available cores */, true /* activate interleaved NUMA allocation policy */ ); // Setup partitioning context - mt_kahypar_context_t* context = mt_kahypar_context_new(); - mt_kahypar_load_preset(context, DEFAULT /* corresponds to MT-KaHyPar-D */); + mt_kahypar_context_t* context = mt_kahypar_context_from_preset(DEFAULT); // In the following, we partition a graph into two blocks // with an allowed imbalance of 3% and optimize the edge cut (CUT) mt_kahypar_set_partitioning_parameters(context, @@ -24,16 +25,23 @@ int main(int argc, char* argv[]) { CUT /* objective function */); mt_kahypar_set_seed(42 /* seed */); // Enable logging - mt_kahypar_set_context_parameter(context, VERBOSE, "1"); + mt_kahypar_status_t status = + mt_kahypar_set_context_parameter(context, VERBOSE, "1", &error); + assert(status == SUCCESS); - // Load Hypergraph for DEFAULT preset - mt_kahypar_hypergraph_t graph = - mt_kahypar_read_hypergraph_from_file("delaunay_n15.graph", - DEFAULT, METIS /* file format */); + // Read Graph + mt_kahypar_hypergraph_t graph = mt_kahypar_read_hypergraph_from_file( + "delaunay_n15.graph", context, METIS /* file format */, &error); + if (graph.hypergraph == nullptr) { + std::cout << error.msg << std::endl; std::exit(1); + } - // Partition Hypergraph + // Partition Graph mt_kahypar_partitioned_hypergraph_t partitioned_graph = - mt_kahypar_partition(graph, context); + mt_kahypar_partition(graph, context, &error); + if (partitioned_graph.partitioned_hg == nullptr) { + std::cout << error.msg << std::endl; std::exit(1); + } // Extract Partition std::unique_ptr partition = @@ -47,7 +55,7 @@ int main(int argc, char* argv[]) { // Compute Metrics const double imbalance = mt_kahypar_imbalance(partitioned_graph, context); - const double cut = mt_kahypar_cut(partitioned_graph); + const int cut = mt_kahypar_cut(partitioned_graph); // Output Results std::cout << "Partitioning Results:" << std::endl; @@ -59,4 +67,4 @@ int main(int argc, char* argv[]) { mt_kahypar_free_context(context); mt_kahypar_free_hypergraph(graph); mt_kahypar_free_partitioned_hypergraph(partitioned_graph); -} \ No newline at end of file +} diff --git a/lib/examples/partition_hypergraph.cc b/lib/examples/partition_hypergraph.cc index 84a4afe1f..6064ddc2e 100644 --- a/lib/examples/partition_hypergraph.cc +++ b/lib/examples/partition_hypergraph.cc @@ -1,3 +1,4 @@ +#include #include #include #include @@ -8,15 +9,15 @@ // Install library interface via 'sudo make install.mtkahypar' in build folder // Compile with: g++ -std=c++14 -DNDEBUG -O3 partition_hypergraph.cc -o example -lmtkahypar int main(int argc, char* argv[]) { + mt_kahypar_error_t error{}; - // Initialize thread pool + // Initialize mt_kahypar_initialize( std::thread::hardware_concurrency() /* use all available cores */, true /* activate interleaved NUMA allocation policy */ ); // Setup partitioning context - mt_kahypar_context_t* context = mt_kahypar_context_new(); - mt_kahypar_load_preset(context, DEFAULT /* corresponds to MT-KaHyPar-D */); + mt_kahypar_context_t* context = mt_kahypar_context_from_preset(DEFAULT); // In the following, we partition a hypergraph into two blocks // with an allowed imbalance of 3% and optimize the connective metric (KM1) mt_kahypar_set_partitioning_parameters(context, @@ -24,16 +25,24 @@ int main(int argc, char* argv[]) { KM1 /* objective function */); mt_kahypar_set_seed(42 /* seed */); // Enable logging - mt_kahypar_set_context_parameter(context, VERBOSE, "1"); + mt_kahypar_status_t status = + mt_kahypar_set_context_parameter(context, VERBOSE, "1", &error); + assert(status == SUCCESS); // Load Hypergraph for DEFAULT preset mt_kahypar_hypergraph_t hypergraph = mt_kahypar_read_hypergraph_from_file("ibm01.hgr", - DEFAULT, HMETIS /* file format */); + context, HMETIS /* file format */, &error); + if (hypergraph.hypergraph == nullptr) { + std::cout << error.msg << std::endl; std::exit(1); + } // Partition Hypergraph mt_kahypar_partitioned_hypergraph_t partitioned_hg = - mt_kahypar_partition(hypergraph, context); + mt_kahypar_partition(hypergraph, context, &error); + if (partitioned_hg.partitioned_hg == nullptr) { + std::cout << error.msg << std::endl; std::exit(1); + } // Extract Partition std::unique_ptr partition = @@ -47,7 +56,7 @@ int main(int argc, char* argv[]) { // Compute Metrics const double imbalance = mt_kahypar_imbalance(partitioned_hg, context); - const double km1 = mt_kahypar_km1(partitioned_hg); + const int km1 = mt_kahypar_km1(partitioned_hg); // Output Results std::cout << "Partitioning Results:" << std::endl; @@ -59,4 +68,4 @@ int main(int argc, char* argv[]) { mt_kahypar_free_context(context); mt_kahypar_free_hypergraph(hypergraph); mt_kahypar_free_partitioned_hypergraph(partitioned_hg); -} \ No newline at end of file +} diff --git a/lib/examples/partition_with_fixed_vertices.cc b/lib/examples/partition_with_fixed_vertices.cc index a9e964ee5..1586b03a1 100644 --- a/lib/examples/partition_with_fixed_vertices.cc +++ b/lib/examples/partition_with_fixed_vertices.cc @@ -1,3 +1,4 @@ +#include #include #include #include @@ -8,6 +9,7 @@ // Install library interface via 'sudo make install.mtkahypar' in build folder // Compile with: g++ -std=c++14 -DNDEBUG -O3 partition_with_fixed_vertices.cc -o example -lmtkahypar int main(int argc, char* argv[]) { + mt_kahypar_error_t error{}; // Initialize thread pool mt_kahypar_initialize( @@ -15,8 +17,7 @@ int main(int argc, char* argv[]) { true /* activate interleaved NUMA allocation policy */ ); // Setup partitioning context - mt_kahypar_context_t* context = mt_kahypar_context_new(); - mt_kahypar_load_preset(context, DEFAULT /* corresponds to MT-KaHyPar-D */); + mt_kahypar_context_t* context = mt_kahypar_context_from_preset(DEFAULT); // In the following, we partition a hypergraph into four blocks // with an allowed imbalance of 3% and optimize the connective metric (KM1) mt_kahypar_set_partitioning_parameters(context, @@ -24,28 +25,41 @@ int main(int argc, char* argv[]) { KM1 /* objective function */); mt_kahypar_set_seed(42 /* seed */); // Enable logging - mt_kahypar_set_context_parameter(context, VERBOSE, "1"); + mt_kahypar_status_t status = + mt_kahypar_set_context_parameter(context, VERBOSE, "1", &error); + assert(status == SUCCESS); // Load Hypergraph for DEFAULT preset mt_kahypar_hypergraph_t hypergraph = mt_kahypar_read_hypergraph_from_file("ibm01.hgr", - DEFAULT, HMETIS /* file format */); + context, HMETIS /* file format */, &error); + if (hypergraph.hypergraph == nullptr) { + std::cout << error.msg << std::endl; std::exit(1); + } // Add fixed vertices from a fixed vertex file std::unique_ptr fixed_vertices = std::make_unique(mt_kahypar_num_hypernodes(hypergraph)); - for ( size_t i = 0; i < 100; ++i ) { - std::cout << fixed_vertices[i] << std::endl; + status = mt_kahypar_read_fixed_vertices_from_file( + "ibm01.k4.p1.fix", mt_kahypar_num_hypernodes(hypergraph), fixed_vertices.get(), &error); + if (status != SUCCESS) { + std::cout << error.msg << std::endl; std::exit(1); + } + status = mt_kahypar_add_fixed_vertices( + hypergraph, fixed_vertices.get(), 4 /* number of blocks */, &error); + if (status != SUCCESS) { + std::cout << error.msg << std::endl; std::exit(1); } - mt_kahypar_read_fixed_vertices_from_file("ibm01.k4.p1.fix", fixed_vertices.get()); - mt_kahypar_add_fixed_vertices(hypergraph, fixed_vertices.get(), 4 /* number of blocks */); // Or simply add the fixed vertices of the file directly to the hypergraph: // mt_kahypar_add_fixed_vertices_from_file( - // hypergraph, "ibm01.k4.p1.fix", 4 /* number of blocks */); + // hypergraph, "ibm01.k4.p1.fix", 4 /* number of blocks */, &error); // Partition Hypergraph mt_kahypar_partitioned_hypergraph_t partitioned_hg = - mt_kahypar_partition(hypergraph, context); + mt_kahypar_partition(hypergraph, context, &error); + if (partitioned_hg.partitioned_hg == nullptr) { + std::cout << error.msg << std::endl; std::exit(1); + } // Extract Partition std::unique_ptr partition = @@ -59,7 +73,7 @@ int main(int argc, char* argv[]) { // Compute Metrics const double imbalance = mt_kahypar_imbalance(partitioned_hg, context); - const double km1 = mt_kahypar_km1(partitioned_hg); + const int km1 = mt_kahypar_km1(partitioned_hg); // Output Results std::cout << "Partitioning Results:" << std::endl; @@ -82,4 +96,4 @@ int main(int argc, char* argv[]) { mt_kahypar_free_context(context); mt_kahypar_free_hypergraph(hypergraph); mt_kahypar_free_partitioned_hypergraph(partitioned_hg); -} \ No newline at end of file +} diff --git a/lib/examples/partition_with_individual_block_weights.cc b/lib/examples/partition_with_individual_block_weights.cc index cee90e9ef..d53c168b4 100644 --- a/lib/examples/partition_with_individual_block_weights.cc +++ b/lib/examples/partition_with_individual_block_weights.cc @@ -1,3 +1,4 @@ +#include #include #include #include @@ -8,6 +9,7 @@ // Install library interface via 'sudo make install.mtkahypar' in build folder // Compile with: g++ -std=c++14 -DNDEBUG -O3 partition_with_individual_block_weights.cc -o example -lmtkahypar int main(int argc, char* argv[]) { + mt_kahypar_error_t error{}; // Initialize thread pool mt_kahypar_initialize( @@ -15,8 +17,7 @@ int main(int argc, char* argv[]) { true /* activate interleaved NUMA allocation policy */ ); // Setup partitioning context - mt_kahypar_context_t* context = mt_kahypar_context_new(); - mt_kahypar_load_preset(context, DEFAULT /* corresponds to MT-KaHyPar-D */); + mt_kahypar_context_t* context = mt_kahypar_context_from_preset(DEFAULT); // In the following, we partition a hypergraph into four blocks // and optimize the connective metric (KM1) mt_kahypar_set_partitioning_parameters(context, @@ -25,29 +26,33 @@ int main(int argc, char* argv[]) { KM1 /* objective function */); mt_kahypar_set_seed(42 /* seed */); // Enable logging - mt_kahypar_set_context_parameter(context, VERBOSE, "1"); + mt_kahypar_status_t status = + mt_kahypar_set_context_parameter(context, VERBOSE, "1", &error); + assert(status == SUCCESS); // Setup Individual Block Weights std::unique_ptr individual_block_weights = std::make_unique(4); - // The weight of the first block must be smaller or equal than 2131 - individual_block_weights[0] = 2131; - // The weight of the second block must be smaller or equal than 1213 - individual_block_weights[1] = 1213; - // The weight of the third block must be smaller or equal than 7287 - individual_block_weights[2] = 7287; - // The weight of the fourth block must be smaller or equal than 2501 - individual_block_weights[3] = 2501; + individual_block_weights[0] = 2131; // weight of first block must <= 2131 + individual_block_weights[1] = 1213; // weight of second block must <= 1213 + individual_block_weights[2] = 7287; // weight of third block must <= 7287 + individual_block_weights[3] = 2501; // weight of third block must <= 2501 mt_kahypar_set_individual_target_block_weights(context, 4, individual_block_weights.get()); // Load Hypergraph for DEFAULT preset mt_kahypar_hypergraph_t hypergraph = mt_kahypar_read_hypergraph_from_file("ibm01.hgr", - DEFAULT, HMETIS /* file format */); + context, HMETIS /* file format */, &error); + if (hypergraph.hypergraph == nullptr) { + std::cout << error.msg << std::endl; std::exit(1); + } // Partition Hypergraph mt_kahypar_partitioned_hypergraph_t partitioned_hg = - mt_kahypar_partition(hypergraph, context); + mt_kahypar_partition(hypergraph, context, &error); + if (partitioned_hg.partitioned_hg == nullptr) { + std::cout << error.msg << std::endl; std::exit(1); + } // Extract Block Weights std::unique_ptr block_weights = @@ -55,7 +60,7 @@ int main(int argc, char* argv[]) { mt_kahypar_get_block_weights(partitioned_hg, block_weights.get()); // Output Results - const double km1 = mt_kahypar_km1(partitioned_hg); + const int km1 = mt_kahypar_km1(partitioned_hg); std::cout << "Partitioning Results:" << std::endl; std::cout << "Km1 = " << km1 << std::endl; std::cout << "Weight of Block 0 = " << block_weights[0] << " (<= " << individual_block_weights[0] << ")" << std::endl; @@ -66,4 +71,4 @@ int main(int argc, char* argv[]) { mt_kahypar_free_context(context); mt_kahypar_free_hypergraph(hypergraph); mt_kahypar_free_partitioned_hypergraph(partitioned_hg); -} \ No newline at end of file +} diff --git a/lib/mtkahypar.cpp b/lib/mtkahypar.cpp index 0cb346dc5..cf7c12112 100644 --- a/lib/mtkahypar.cpp +++ b/lib/mtkahypar.cpp @@ -5,6 +5,7 @@ * * Copyright (C) 2020 Lars Gottesbüren * Copyright (C) 2020 Tobias Heuer + * Copyright (C) 2024 Nikolai Maas * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -25,29 +26,29 @@ * SOFTWARE. ******************************************************************************/ +#include +#include +#include +#include + #include "include/mtkahypar.h" #include "include/mtkahypartypes.h" -#include "include/helper_functions.h" +#include "include/lib_generic_impls.h" +#include "include/lib_helper_functions.h" #include "tbb/parallel_for.h" #include "mt-kahypar/definitions.h" #include "mt-kahypar/partition/context.h" -#include "mt-kahypar/partition/partitioner_facade.h" #include "mt-kahypar/partition/metrics.h" #include "mt-kahypar/partition/conversion.h" #include "mt-kahypar/partition/mapping/target_graph.h" -#include "mt-kahypar/partition/registries/registry.h" -#include "mt-kahypar/parallel/tbb_initializer.h" #include "mt-kahypar/parallel/stl/scalable_vector.h" #include "mt-kahypar/io/hypergraph_factory.h" #include "mt-kahypar/io/hypergraph_io.h" -#include "mt-kahypar/io/presets.h" #include "mt-kahypar/macros.h" -#include "mt-kahypar/utils/cast.h" #include "mt-kahypar/utils/delete.h" -#include "mt-kahypar/io/command_line_options.h" - +#include "mt-kahypar/utils/exception.h" using namespace mt_kahypar; @@ -74,13 +75,46 @@ namespace { return PresetType::UNDEFINED; } + mt_kahypar_preset_type_t from_preset_type(PresetType preset) { + switch ( preset ) { + case PresetType::deterministic: return DETERMINISTIC; + case PresetType::large_k: return LARGE_K; + case PresetType::default_preset: return DEFAULT; + case PresetType::quality: return QUALITY; + case PresetType::highest_quality: return HIGHEST_QUALITY; + case PresetType::UNDEFINED: return static_cast(0); + } + return static_cast(0); + } + + mt_kahypar_error_t to_error(mt_kahypar_status_t status, const char* msg) { + mt_kahypar_error_t result; + result.status = status; + size_t msg_len = std::strlen(msg); + char* c_msg = static_cast(malloc(msg_len + 1)); + if (c_msg != nullptr) { + std::strcpy(c_msg, msg); + result.msg = c_msg; + result.msg_len = msg_len; + } + return result; + } + + mt_kahypar_error_t to_error(const std::exception& ex) { + if (dynamic_cast(&ex) != nullptr) { + return to_error(mt_kahypar_status_t::INVALID_INPUT, ex.what()); + } else if (dynamic_cast(&ex) != nullptr) { + return to_error(mt_kahypar_status_t::INVALID_PARAMETER, ex.what()); + } else if (dynamic_cast(&ex) != nullptr) { + return to_error(mt_kahypar_status_t::UNSUPPORTED_OPERATION, ex.what()); + } else if (dynamic_cast(&ex) != nullptr) { + return to_error(mt_kahypar_status_t::SYSTEM_ERROR, ex.what()); + } + return to_error(mt_kahypar_status_t::OTHER_ERROR, ex.what()); + } } -mt_kahypar_context_t* mt_kahypar_context_new() { - return reinterpret_cast(new Context(false)); -} - void mt_kahypar_free_context(mt_kahypar_context_t* context) { if (context == nullptr) { return; @@ -88,60 +122,71 @@ void mt_kahypar_free_context(mt_kahypar_context_t* context) { delete reinterpret_cast(context); } -void mt_kahypar_configure_context_from_file(mt_kahypar_context_t* kahypar_context, - const char* ini_file_name) { +mt_kahypar_context_t* mt_kahypar_context_from_file(const char* ini_file_name, + mt_kahypar_error_t* error) { try { - parseIniToContext(*reinterpret_cast(kahypar_context), ini_file_name); + Context* context = new Context(lib::context_from_file(ini_file_name)); + return reinterpret_cast(context); } catch ( std::exception& ex ) { - LOG << ex.what(); + *error = to_error(ex); + return nullptr; } } -void mt_kahypar_load_preset(mt_kahypar_context_t* context, - const mt_kahypar_preset_type_t preset) { - Context& c = *reinterpret_cast(context); - PresetType preset_type = to_preset_type(preset); - - if ( preset_type != PresetType::UNDEFINED ) { - auto preset_option_list = loadPreset(preset_type); - presetToContext(c, preset_option_list); - } +mt_kahypar_context_t* mt_kahypar_context_from_preset(const mt_kahypar_preset_type_t preset) { + Context* context = new Context(lib::context_from_preset(to_preset_type(preset))); + return reinterpret_cast(context); } -int mt_kahypar_set_context_parameter(mt_kahypar_context_t* context, - const mt_kahypar_context_parameter_type_t type, - const char* value) { +mt_kahypar_status_t mt_kahypar_set_context_parameter(mt_kahypar_context_t* context, + const mt_kahypar_context_parameter_type_t type, + const char* value, + mt_kahypar_error_t* error) { + auto report_conversion_error = [&](const char* expected) { + std::string msg = std::string("Invalid parameter value \"") + value + "\", expected: " + expected; + *error = to_error(mt_kahypar_status_t::INVALID_PARAMETER, msg.c_str()); + }; + auto parse_number = [&](auto& context_parameter, const char* expected) { + std::errc errc = std::from_chars(value, value + std::strlen(value), context_parameter).ec; + if (errc == std::errc{}) { + return mt_kahypar_status_t::SUCCESS; + } + report_conversion_error(expected); + return mt_kahypar_status_t::INVALID_PARAMETER; + }; + Context& c = *reinterpret_cast(context); switch(type) { - case NUM_BLOCKS: - c.partition.k = atoi(value); - if ( c.partition.k > 0 ) return 0; /** success **/ - else return 2; /** integer conversion error **/ - case EPSILON: - c.partition.epsilon = atof(value); - return 0; - case OBJECTIVE: - { - std::string objective(value); - if ( objective == "km1" ) { - c.partition.objective = Objective::km1; - return 0; - } else if ( objective == "cut" ) { - c.partition.objective = Objective::cut; - return 0; - } else if ( objective == "soed" ) { - c.partition.objective = Objective::soed; - } - return 3; + case NUM_BLOCKS: return parse_number(c.partition.k, "positive integer"); + case EPSILON: return parse_number(c.partition.epsilon, "floating point number"); + case NUM_VCYCLES: return parse_number(c.partition.num_vcycles, "positive integer"); + case OBJECTIVE: { + std::string objective(value); + if ( objective == "km1" ) { + c.partition.objective = Objective::km1; + return mt_kahypar_status_t::SUCCESS; + } else if ( objective == "cut" ) { + c.partition.objective = Objective::cut; + return mt_kahypar_status_t::SUCCESS; + } else if ( objective == "soed" ) { + c.partition.objective = Objective::soed; + return mt_kahypar_status_t::SUCCESS; } - case NUM_VCYCLES: - c.partition.num_vcycles = atoi(value); - return 0; + report_conversion_error("one of km1, cut, soed"); + return mt_kahypar_status_t::INVALID_PARAMETER; + } case VERBOSE: - c.partition.verbose_output = atoi(value); - return 0; + try { + c.partition.verbose_output = boost::lexical_cast(value); + return mt_kahypar_status_t::SUCCESS; + } catch ( boost::bad_lexical_cast& ) { + report_conversion_error("boolean"); + return mt_kahypar_status_t::INVALID_PARAMETER; + } } - return 1; /** no valid parameter type **/ + *error = to_error(mt_kahypar_status_t::INVALID_PARAMETER, + "Type must be a valid value of mt_kahypar_context_parameter_type_t"); + return mt_kahypar_status_t::INVALID_PARAMETER; } void mt_kahypar_set_partitioning_parameters(mt_kahypar_context_t* context, @@ -161,6 +206,34 @@ void mt_kahypar_set_partitioning_parameters(mt_kahypar_context_t* context, } } +mt_kahypar_preset_type_t mt_kahypar_get_preset(const mt_kahypar_context_t* context) { + return from_preset_type(reinterpret_cast(context)->partition.preset_type); +} + +mt_kahypar_partition_id_t mt_kahypar_get_num_blocks(const mt_kahypar_context_t* context) { + return reinterpret_cast(context)->partition.k; +} + +double mt_kahypar_get_epsilon(const mt_kahypar_context_t* context) { + return reinterpret_cast(context)->partition.epsilon; +} + +mt_kahypar_objective_t mt_kahypar_get_objective(const mt_kahypar_context_t* context) { + switch ( reinterpret_cast(context)->partition.objective) { + case Objective::cut: + return CUT; + case Objective::km1: + return KM1; + case Objective::soed: + return SOED; + case Objective::steiner_tree: + case Objective::UNDEFINED: + return static_cast(0); + // omit default case to trigger compiler warning for missing cases + } + return static_cast(0); +} + void mt_kahypar_set_seed(const size_t seed) { utils::Randomize::instance().setSeed(seed); } @@ -168,79 +241,59 @@ void mt_kahypar_set_seed(const size_t seed) { void mt_kahypar_set_individual_target_block_weights(mt_kahypar_context_t* context, const mt_kahypar_partition_id_t num_blocks, const mt_kahypar_hypernode_weight_t* block_weights) { - Context& c = *reinterpret_cast(context); - c.partition.use_individual_part_weights = true; - c.partition.max_part_weights.assign(num_blocks, 0); - for ( mt_kahypar_partition_id_t i = 0; i < num_blocks; ++i ) { - c.partition.max_part_weights[i] = block_weights[i]; - } + lib::set_individual_block_weights(reinterpret_cast(*context), num_blocks, block_weights); } void mt_kahypar_initialize(const size_t num_threads, const bool interleaved_allocations) { - size_t P = num_threads; - #ifndef KAHYPAR_DISABLE_HWLOC - size_t num_available_cpus = HardwareTopology::instance().num_cpus(); - if ( num_available_cpus < num_threads ) { - WARNING("There are currently only" << num_available_cpus << "cpus available." - << "Setting number of threads from" << num_threads - << "to" << num_available_cpus); - P = num_available_cpus; - } - #endif - - // Initialize TBB task arenas on numa nodes - TBBInitializer::instance(P); - - #ifndef KAHYPAR_DISABLE_HWLOC - if ( interleaved_allocations ) { - // We set the membind policy to interleaved allocations in order to - // distribute allocations evenly across NUMA nodes - hwloc_cpuset_t cpuset = TBBInitializer::instance().used_cpuset(); - parallel::HardwareTopology<>::instance().activate_interleaved_membind_policy(cpuset); - hwloc_bitmap_free(cpuset); - } - #else - unused(interleaved_allocations); - #endif + lib::initialize(num_threads, interleaved_allocations, false); +} - register_algorithms_and_policies(); +void mt_kahypar_free_error_content(mt_kahypar_error_t* error) { + free(const_cast(error->msg)); + error->status = mt_kahypar_status_t::SUCCESS; + error->msg = nullptr; + error->msg_len = 0; } mt_kahypar_hypergraph_t mt_kahypar_read_hypergraph_from_file(const char* file_name, - const mt_kahypar_preset_type_t preset, - const mt_kahypar_file_format_type_t file_format) { - const PresetType config = to_preset_type(preset); + const mt_kahypar_context_t* context, + const mt_kahypar_file_format_type_t file_format, + mt_kahypar_error_t* error) { + const Context& c = *reinterpret_cast(context); const InstanceType instance = file_format == HMETIS ? InstanceType::hypergraph : InstanceType::graph; const FileFormat format = file_format == HMETIS ? FileFormat::hMetis : FileFormat::Metis; - const bool stable_construction = preset == DETERMINISTIC ? true : false; try { - return io::readInputFile(file_name, config, instance, format, stable_construction); + return lib::hypergraph_from_file(file_name, c, instance, format); } catch ( std::exception& ex ) { - LOG << ex.what(); + *error = to_error(ex); } return mt_kahypar_hypergraph_t { nullptr, NULLPTR_HYPERGRAPH }; } -mt_kahypar_target_graph_t* mt_kahypar_read_target_graph_from_file(const char* file_name) { +mt_kahypar_target_graph_t* mt_kahypar_read_target_graph_from_file(const char* file_name, + const mt_kahypar_context_t* context, + mt_kahypar_error_t* error) { + unused(context); TargetGraph* target_graph = nullptr; try { ds::StaticGraph graph = io::readInputFile(file_name, FileFormat::Metis, true); target_graph = new TargetGraph(std::move(graph)); } catch ( std::exception& ex ) { - LOG << ex.what(); + *error = to_error(ex); } return reinterpret_cast(target_graph); } -mt_kahypar_hypergraph_t mt_kahypar_create_hypergraph(const mt_kahypar_preset_type_t preset, +mt_kahypar_hypergraph_t mt_kahypar_create_hypergraph(const mt_kahypar_context_t* context, const mt_kahypar_hypernode_id_t num_vertices, const mt_kahypar_hyperedge_id_t num_hyperedges, const size_t* hyperedge_indices, const mt_kahypar_hyperedge_id_t* hyperedges, const mt_kahypar_hyperedge_weight_t* hyperedge_weights, - const mt_kahypar_hypernode_weight_t* vertex_weights) { - // Transform adjacence array into adjacence list + const mt_kahypar_hypernode_weight_t* vertex_weights, + mt_kahypar_error_t* error) { + // Transform adjacence array into adjacency list vec> edge_vector(num_hyperedges); tbb::parallel_for(0, num_hyperedges, [&](const mt_kahypar::HyperedgeID& he) { const size_t num_pins = hyperedge_indices[he + 1] - hyperedge_indices[he]; @@ -250,67 +303,45 @@ mt_kahypar_hypergraph_t mt_kahypar_create_hypergraph(const mt_kahypar_preset_typ } }); + const Context& c = *reinterpret_cast(context); try { - switch ( preset ) { - case DETERMINISTIC: - case LARGE_K: - case DEFAULT: - case QUALITY: - return mt_kahypar_hypergraph_t { - reinterpret_cast(new ds::StaticHypergraph( - StaticHypergraphFactory::construct(num_vertices, num_hyperedges, - edge_vector, hyperedge_weights, vertex_weights, preset == DETERMINISTIC))), STATIC_HYPERGRAPH }; - case HIGHEST_QUALITY: - return mt_kahypar_hypergraph_t { - reinterpret_cast(new ds::DynamicHypergraph( - DynamicHypergraphFactory::construct(num_vertices, num_hyperedges, - edge_vector, hyperedge_weights, vertex_weights, false))), DYNAMIC_HYPERGRAPH }; - } + return lib::create_hypergraph(c, num_vertices, num_hyperedges, edge_vector, hyperedge_weights, vertex_weights); } catch ( std::exception& ex ) { - LOG << ex.what(); + *error = to_error(ex); } return mt_kahypar_hypergraph_t { nullptr, NULLPTR_HYPERGRAPH }; } -mt_kahypar_hypergraph_t mt_kahypar_create_graph(const mt_kahypar_preset_type_t preset, +mt_kahypar_hypergraph_t mt_kahypar_create_graph(const mt_kahypar_context_t* context, const mt_kahypar_hypernode_id_t num_vertices, const mt_kahypar_hyperedge_id_t num_edges, const mt_kahypar_hypernode_id_t* edges, const mt_kahypar_hyperedge_weight_t* edge_weights, - const mt_kahypar_hypernode_weight_t* vertex_weights) { + const mt_kahypar_hypernode_weight_t* vertex_weights, + mt_kahypar_error_t* error) { // Transform adjacence array into adjacence list vec> edge_vector(num_edges); tbb::parallel_for(0, num_edges, [&](const mt_kahypar::HyperedgeID& he) { edge_vector[he] = std::make_pair(edges[2*he], edges[2*he + 1]); }); + const Context& c = *reinterpret_cast(context); try { - switch ( preset ) { - case DETERMINISTIC: - case LARGE_K: - case DEFAULT: - case QUALITY: - return mt_kahypar_hypergraph_t { - reinterpret_cast(new ds::StaticGraph( - StaticGraphFactory::construct_from_graph_edges(num_vertices, num_edges, - edge_vector, edge_weights, vertex_weights, preset == DETERMINISTIC))), STATIC_GRAPH }; - case HIGHEST_QUALITY: - return mt_kahypar_hypergraph_t { - reinterpret_cast(new ds::DynamicGraph( - DynamicGraphFactory::construct_from_graph_edges(num_vertices, num_edges, - edge_vector, edge_weights, vertex_weights, false))), DYNAMIC_GRAPH }; - } + return lib::create_graph(c, num_vertices, num_edges, edge_vector, edge_weights, vertex_weights); } catch ( std::exception& ex ) { - LOG << ex.what(); + *error = to_error(ex); } return mt_kahypar_hypergraph_t { nullptr, NULLPTR_HYPERGRAPH }; } -mt_kahypar_target_graph_t* mt_kahypar_create_target_graph(const mt_kahypar_hypernode_id_t num_vertices, +mt_kahypar_target_graph_t* mt_kahypar_create_target_graph(const mt_kahypar_context_t* context, + const mt_kahypar_hypernode_id_t num_vertices, const mt_kahypar_hyperedge_id_t num_edges, const mt_kahypar_hypernode_id_t* edges, - const mt_kahypar_hyperedge_weight_t* edge_weights) { - // Transform adjacence array into adjacence list + const mt_kahypar_hyperedge_weight_t* edge_weights, + mt_kahypar_error_t* error) { + unused(context); + // Transform adjacency array into adjacence list vec> edge_vector(num_edges); tbb::parallel_for(0, num_edges, [&](const mt_kahypar::HyperedgeID& he) { edge_vector[he] = std::make_pair(edges[2*he], edges[2*he + 1]); @@ -322,7 +353,7 @@ mt_kahypar_target_graph_t* mt_kahypar_create_target_graph(const mt_kahypar_hyper num_vertices, num_edges, edge_vector, edge_weights, nullptr, true); target_graph = new TargetGraph(std::move(graph)); } catch ( std::exception& ex ) { - LOG << ex.what(); + *error = to_error(ex); } return reinterpret_cast(target_graph); } @@ -338,81 +369,120 @@ void mt_kahypar_free_target_graph(mt_kahypar_target_graph_t* target_graph) { } } +void mt_kahypar_free_partitioned_hypergraph(mt_kahypar_partitioned_hypergraph_t partitioned_hg) { + utils::delete_partitioned_hypergraph(partitioned_hg); +} + mt_kahypar_hypernode_id_t mt_kahypar_num_hypernodes(mt_kahypar_hypergraph_t hypergraph) { - switch ( hypergraph.type ) { - case STATIC_GRAPH: return utils::cast(hypergraph).initialNumNodes(); - case DYNAMIC_GRAPH: return utils::cast(hypergraph).initialNumNodes(); - case STATIC_HYPERGRAPH: return utils::cast(hypergraph).initialNumNodes(); - case DYNAMIC_HYPERGRAPH: return utils::cast(hypergraph).initialNumNodes(); - case NULLPTR_HYPERGRAPH: return 0; - } - return 0; + return lib::num_nodes(hypergraph); } mt_kahypar_hyperedge_id_t mt_kahypar_num_hyperedges(mt_kahypar_hypergraph_t hypergraph) { - switch ( hypergraph.type ) { - case STATIC_GRAPH: return utils::cast(hypergraph).initialNumEdges() / 2; - case DYNAMIC_GRAPH: return utils::cast(hypergraph).initialNumEdges() / 2; - case STATIC_HYPERGRAPH: return utils::cast(hypergraph).initialNumEdges(); - case DYNAMIC_HYPERGRAPH: return utils::cast(hypergraph).initialNumEdges(); - case NULLPTR_HYPERGRAPH: return 0; - } - return 0; + return lib::num_edges(hypergraph); } mt_kahypar_hypernode_id_t mt_kahypar_num_pins(mt_kahypar_hypergraph_t hypergraph) { - switch ( hypergraph.type ) { - case STATIC_GRAPH: return utils::cast(hypergraph).initialNumPins(); - case DYNAMIC_GRAPH: return utils::cast(hypergraph).initialNumPins(); - case STATIC_HYPERGRAPH: return utils::cast(hypergraph).initialNumPins(); - case DYNAMIC_HYPERGRAPH: return utils::cast(hypergraph).initialNumPins(); - case NULLPTR_HYPERGRAPH: return 0; - } - return 0; + return lib::num_pins(hypergraph); } mt_kahypar_hypernode_id_t mt_kahypar_hypergraph_weight(mt_kahypar_hypergraph_t hypergraph) { - switch ( hypergraph.type ) { - case STATIC_GRAPH: return utils::cast(hypergraph).totalWeight(); - case DYNAMIC_GRAPH: return utils::cast(hypergraph).totalWeight(); - case STATIC_HYPERGRAPH: return utils::cast(hypergraph).totalWeight(); - case DYNAMIC_HYPERGRAPH: return utils::cast(hypergraph).totalWeight(); - case NULLPTR_HYPERGRAPH: return 0; - } - return 0; + return lib::total_weight(hypergraph); } -void mt_kahypar_free_partitioned_hypergraph(mt_kahypar_partitioned_hypergraph_t partitioned_hg) { - utils::delete_partitioned_hypergraph(partitioned_hg); +mt_kahypar_hyperedge_id_t mt_kahypar_hypernode_degree(mt_kahypar_hypergraph_t hypergraph, mt_kahypar_hypernode_id_t node) { + return lib::node_degree(hypergraph, node); +} + +mt_kahypar_hypernode_weight_t mt_kahypar_hypernode_weight(mt_kahypar_hypergraph_t hypergraph, mt_kahypar_hypernode_id_t node) { + return lib::node_weight(hypergraph, node); +} + +mt_kahypar_hypernode_id_t mt_kahypar_hyperedge_size(mt_kahypar_hypergraph_t hypergraph, mt_kahypar_hyperedge_id_t edge) { + return lib::edge_size(hypergraph, edge); +} + +mt_kahypar_hyperedge_weight_t mt_kahypar_hyperedge_weight(mt_kahypar_hypergraph_t hypergraph, mt_kahypar_hyperedge_id_t edge) { + return lib::edge_weight(hypergraph, edge); +} + +mt_kahypar_hyperedge_id_t mt_kahypar_get_incident_hyperedges(mt_kahypar_hypergraph_t hypergraph, + mt_kahypar_hypernode_id_t node, + mt_kahypar_hyperedge_id_t* edge_buffer) { + return lib::switch_hg(hypergraph, [&](auto& hg) { + HyperedgeID i = 0; + for (HypernodeID he: hg.incidentEdges(node)) { + edge_buffer[i++] = he; + } + return i; + }); +} + +mt_kahypar_hypernode_id_t mt_kahypar_get_hyperedge_pins(mt_kahypar_hypergraph_t hypergraph, + mt_kahypar_hyperedge_id_t edge, + mt_kahypar_hypernode_id_t* pin_buffer) { + return lib::switch_hg(hypergraph, [&](auto& hg) { + HypernodeID i = 0; + for (HypernodeID hn: hg.pins(edge)) { + pin_buffer[i++] = hn; + } + return i; + }); +} + +bool mt_kahypar_is_graph(mt_kahypar_hypergraph_t hypergraph) { + return hypergraph.type == STATIC_GRAPH || hypergraph.type == DYNAMIC_GRAPH; +} + +mt_kahypar_hypernode_id_t mt_kahypar_edge_source(mt_kahypar_hypergraph_t graph, mt_kahypar_hyperedge_id_t edge) { + return lib::switch_graph(graph, [=](auto& hg) { + return hg.edgeSource(edge); + }); +} + +mt_kahypar_hypernode_id_t mt_kahypar_edge_target(mt_kahypar_hypergraph_t graph, mt_kahypar_hyperedge_id_t edge) { + return lib::switch_graph(graph, [=](auto& hg) { + return hg.edgeTarget(edge); + }); } -void mt_kahypar_add_fixed_vertices(mt_kahypar_hypergraph_t hypergraph, - mt_kahypar_partition_id_t* fixed_vertices, - mt_kahypar_partition_id_t num_blocks) { + +mt_kahypar_status_t mt_kahypar_add_fixed_vertices(mt_kahypar_hypergraph_t hypergraph, + const mt_kahypar_partition_id_t* fixed_vertices, + mt_kahypar_partition_id_t num_blocks, + mt_kahypar_error_t* error) { try { io::addFixedVertices(hypergraph, fixed_vertices, num_blocks); + return mt_kahypar_status_t::SUCCESS; } catch ( std::exception& ex ) { - LOG << ex.what(); + *error = to_error(ex); + return error->status; } } -void mt_kahypar_read_fixed_vertices_from_file(const char* file_name, - mt_kahypar_partition_id_t* fixed_vertices) { +mt_kahypar_status_t mt_kahypar_read_fixed_vertices_from_file(const char* file_name, + mt_kahypar_hypernode_id_t num_nodes, + mt_kahypar_partition_id_t* fixed_vertices, + mt_kahypar_error_t* error) { try { - io::readPartitionFile(file_name, fixed_vertices); + io::readPartitionFile(file_name, num_nodes, fixed_vertices); + return mt_kahypar_status_t::SUCCESS; } catch ( std::exception& ex ) { - LOG << ex.what(); + *error = to_error(ex); + return error->status; } } -void mt_kahypar_add_fixed_vertices_from_file(mt_kahypar_hypergraph_t hypergraph, - const char* file_name, - mt_kahypar_partition_id_t num_blocks) { +mt_kahypar_status_t mt_kahypar_add_fixed_vertices_from_file(mt_kahypar_hypergraph_t hypergraph, + const char* file_name, + mt_kahypar_partition_id_t num_blocks, + mt_kahypar_error_t* error) { try { io::addFixedVerticesFromFile(hypergraph, file_name, num_blocks); + return mt_kahypar_status_t::SUCCESS; } catch ( std::exception& ex ) { - LOG << ex.what(); + *error = to_error(ex); + return error->status; } } @@ -420,279 +490,174 @@ void mt_kahypar_remove_fixed_vertices(mt_kahypar_hypergraph_t hypergraph) { io::removeFixedVertices(hypergraph); } +bool mt_kahypar_is_fixed_vertex(mt_kahypar_hypergraph_t hypergraph, mt_kahypar_hypernode_id_t node) { + return lib::is_fixed(hypergraph, node); +} + +mt_kahypar_partition_id_t mt_kahypar_fixed_vertex_block(mt_kahypar_hypergraph_t hypergraph, mt_kahypar_hypernode_id_t node) { + return lib::fixed_vertex_block(hypergraph, node); +} + bool mt_kahypar_check_compatibility(mt_kahypar_hypergraph_t hypergraph, - mt_kahypar_preset_type_t preset) { - return lib::check_compatibility(hypergraph, preset); + mt_kahypar_preset_type_t preset) { + return lib::is_compatible(hypergraph, preset); } mt_kahypar_partitioned_hypergraph_t mt_kahypar_partition(mt_kahypar_hypergraph_t hypergraph, - mt_kahypar_context_t* context) { - Context& c = *reinterpret_cast(context); - if ( lib::check_if_all_relavant_parameters_are_set(c) ) { - if ( mt_kahypar_check_compatibility(hypergraph, lib::get_preset_c_type(c.partition.preset_type)) ) { - c.partition.instance_type = lib::get_instance_type(hypergraph); - c.partition.partition_type = to_partition_c_type( - c.partition.preset_type, c.partition.instance_type); - lib::prepare_context(c); - c.partition.num_vcycles = 0; - try { - return PartitionerFacade::partition(hypergraph, c); - } catch ( std::exception& ex ) { - LOG << ex.what(); - } - } else { - WARNING(lib::incompatibility_description(hypergraph)); - } + const mt_kahypar_context_t* context, + mt_kahypar_error_t* error) { + try { + return lib::partition(hypergraph, reinterpret_cast(*context)); + } catch ( std::exception& ex ) { + *error = to_error(ex); } return mt_kahypar_partitioned_hypergraph_t { nullptr, NULLPTR_PARTITION }; } mt_kahypar_partitioned_hypergraph_t mt_kahypar_map(mt_kahypar_hypergraph_t hypergraph, mt_kahypar_target_graph_t* target_graph, - mt_kahypar_context_t* context) { - Context& c = *reinterpret_cast(context); - if ( lib::check_if_all_relavant_parameters_are_set(c) ) { - if ( mt_kahypar_check_compatibility(hypergraph, lib::get_preset_c_type(c.partition.preset_type)) ) { - c.partition.instance_type = lib::get_instance_type(hypergraph); - c.partition.partition_type = to_partition_c_type( - c.partition.preset_type, c.partition.instance_type); - lib::prepare_context(c); - c.partition.num_vcycles = 0; - c.partition.objective = Objective::steiner_tree; - TargetGraph* target = reinterpret_cast(target_graph); - try { - return PartitionerFacade::partition(hypergraph, c, target); - } catch ( std::exception& ex ) { - LOG << ex.what(); - } - } else { - WARNING(lib::incompatibility_description(hypergraph)); - } + const mt_kahypar_context_t* context, + mt_kahypar_error_t* error) { + try { + return lib::map(hypergraph, + reinterpret_cast(*target_graph), + reinterpret_cast(*context)); + } catch ( std::exception& ex ) { + *error = to_error(ex); } return mt_kahypar_partitioned_hypergraph_t { nullptr, NULLPTR_PARTITION }; } MT_KAHYPAR_API bool mt_kahypar_check_partition_compatibility(mt_kahypar_partitioned_hypergraph_t partitioned_hg, mt_kahypar_preset_type_t preset) { - return lib::check_compatibility(partitioned_hg, preset); + return lib::is_compatible(partitioned_hg, preset); } -void mt_kahypar_improve_partition(mt_kahypar_partitioned_hypergraph_t partitioned_hg, - mt_kahypar_context_t* context, - const size_t num_vcycles) { - Context& c = *reinterpret_cast(context); - if ( lib::check_if_all_relavant_parameters_are_set(c) ) { - if ( mt_kahypar_check_partition_compatibility( - partitioned_hg, lib::get_preset_c_type(c.partition.preset_type)) ) { - c.partition.instance_type = lib::get_instance_type(partitioned_hg); - c.partition.partition_type = to_partition_c_type( - c.partition.preset_type, c.partition.instance_type); - lib::prepare_context(c); - c.partition.num_vcycles = num_vcycles; - try { - PartitionerFacade::improve(partitioned_hg, c); - } catch ( std::exception& ex ) { - LOG << ex.what(); - } - } else { - WARNING(lib::incompatibility_description(partitioned_hg)); - } +mt_kahypar_status_t mt_kahypar_improve_partition(mt_kahypar_partitioned_hypergraph_t partitioned_hg, + const mt_kahypar_context_t* context, + const size_t num_vcycles, + mt_kahypar_error_t* error) { + try { + lib::improve(partitioned_hg, reinterpret_cast(*context), num_vcycles); + return mt_kahypar_status_t::SUCCESS; + } catch ( std::exception& ex ) { + *error = to_error(ex); + return error->status; } } -void mt_kahypar_improve_mapping(mt_kahypar_partitioned_hypergraph_t partitioned_hg, +mt_kahypar_status_t mt_kahypar_improve_mapping(mt_kahypar_partitioned_hypergraph_t partitioned_hg, mt_kahypar_target_graph_t* target_graph, - mt_kahypar_context_t* context, - const size_t num_vcycles) { - Context& c = *reinterpret_cast(context); - if ( lib::check_if_all_relavant_parameters_are_set(c) ) { - if ( mt_kahypar_check_partition_compatibility( - partitioned_hg, lib::get_preset_c_type(c.partition.preset_type)) ) { - c.partition.instance_type = lib::get_instance_type(partitioned_hg); - c.partition.partition_type = to_partition_c_type( - c.partition.preset_type, c.partition.instance_type); - lib::prepare_context(c); - c.partition.num_vcycles = num_vcycles; - c.partition.objective = Objective::steiner_tree; - TargetGraph* target = reinterpret_cast(target_graph); - try { - PartitionerFacade::improve(partitioned_hg, c, target); - } catch ( std::exception& ex ) { - LOG << ex.what(); - } - } else { - WARNING(lib::incompatibility_description(partitioned_hg)); - } + const mt_kahypar_context_t* context, + const size_t num_vcycles, + mt_kahypar_error_t* error) { + try { + lib::improve_mapping(partitioned_hg, + reinterpret_cast(*target_graph), + reinterpret_cast(*context), + num_vcycles); + return mt_kahypar_status_t::SUCCESS; + } catch ( std::exception& ex ) { + *error = to_error(ex); + return error->status; } } mt_kahypar_partitioned_hypergraph_t mt_kahypar_create_partitioned_hypergraph(mt_kahypar_hypergraph_t hypergraph, - const mt_kahypar_preset_type_t preset, + const mt_kahypar_context_t* context, const mt_kahypar_partition_id_t num_blocks, - const mt_kahypar_partition_id_t* partition) { - if ( hypergraph.type == STATIC_GRAPH || hypergraph.type == DYNAMIC_GRAPH ) { - switch ( preset ) { - case LARGE_K: - case DETERMINISTIC: - case DEFAULT: - case QUALITY: - ASSERT(hypergraph.type == STATIC_GRAPH); - return lib::create_partitoned_hypergraph( - utils::cast(hypergraph), num_blocks, partition); - case HIGHEST_QUALITY: - ASSERT(hypergraph.type == DYNAMIC_GRAPH); - return lib::create_partitoned_hypergraph( - utils::cast(hypergraph), num_blocks, partition); - } - } else { - switch ( preset ) { - case LARGE_K: - ASSERT(hypergraph.type == STATIC_HYPERGRAPH); - return lib::create_partitoned_hypergraph( - utils::cast(hypergraph), num_blocks, partition); - case DETERMINISTIC: - case DEFAULT: - case QUALITY: - ASSERT(hypergraph.type == STATIC_HYPERGRAPH); - return lib::create_partitoned_hypergraph( - utils::cast(hypergraph), num_blocks, partition); - case HIGHEST_QUALITY: - ASSERT(hypergraph.type == DYNAMIC_HYPERGRAPH); - return lib::create_partitoned_hypergraph( - utils::cast(hypergraph), num_blocks, partition); - } + const mt_kahypar_partition_id_t* partition, + mt_kahypar_error_t* error) { + const Context& c = reinterpret_cast(*context); + try { + return lib::create_partitioned_hypergraph(hypergraph, c, num_blocks, partition); + } catch ( std::exception& ex ) { + *error = to_error(ex); } return mt_kahypar_partitioned_hypergraph_t { nullptr, NULLPTR_PARTITION }; } mt_kahypar_partitioned_hypergraph_t mt_kahypar_read_partition_from_file(mt_kahypar_hypergraph_t hypergraph, - const mt_kahypar_preset_type_t preset, + const mt_kahypar_context_t* context, const mt_kahypar_partition_id_t num_blocks, - const char* partition_file) { + const char* partition_file, + mt_kahypar_error_t* error) { std::vector partition; - io::readPartitionFile(partition_file, partition); - return mt_kahypar_create_partitioned_hypergraph(hypergraph, preset, num_blocks, partition.data()); -} - -void mt_kahypar_write_partition_to_file(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, - const char* partition_file) { - switch ( partitioned_hg.type ) { - case MULTILEVEL_GRAPH_PARTITIONING: - io::writePartitionFile(utils::cast(partitioned_hg), partition_file); break; - case N_LEVEL_GRAPH_PARTITIONING: - io::writePartitionFile(utils::cast(partitioned_hg), partition_file); break; - case MULTILEVEL_HYPERGRAPH_PARTITIONING: - io::writePartitionFile(utils::cast(partitioned_hg), partition_file); break; - case N_LEVEL_HYPERGRAPH_PARTITIONING: - io::writePartitionFile(utils::cast(partitioned_hg), partition_file); break; - case LARGE_K_PARTITIONING: - io::writePartitionFile(utils::cast(partitioned_hg), partition_file); break; - case NULLPTR_PARTITION: break; + const Context& c = reinterpret_cast(*context); + try { + io::readPartitionFile(partition_file, mt_kahypar_num_hypernodes(hypergraph), partition); + return lib::create_partitioned_hypergraph(hypergraph, c, num_blocks, partition.data()); + } catch ( std::exception& ex ) { + *error = to_error(ex); } + return mt_kahypar_partitioned_hypergraph_t { nullptr, NULLPTR_PARTITION }; } -void mt_kahypar_get_partition(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, - mt_kahypar_partition_id_t* partition) { - switch ( partitioned_hg.type ) { - case MULTILEVEL_GRAPH_PARTITIONING: - lib::get_partition(utils::cast(partitioned_hg), partition); break; - case N_LEVEL_GRAPH_PARTITIONING: - lib::get_partition(utils::cast(partitioned_hg), partition); break; - case MULTILEVEL_HYPERGRAPH_PARTITIONING: - lib::get_partition(utils::cast(partitioned_hg), partition); break; - case N_LEVEL_HYPERGRAPH_PARTITIONING: - lib::get_partition(utils::cast(partitioned_hg), partition); break; - case LARGE_K_PARTITIONING: - lib::get_partition(utils::cast(partitioned_hg), partition); break; - case NULLPTR_PARTITION: break; +mt_kahypar_status_t mt_kahypar_write_partition_to_file(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, + const char* partition_file, + mt_kahypar_error_t* error) { + try { + lib::write_partition_to_file(partitioned_hg, partition_file); + return mt_kahypar_status_t::SUCCESS; + } catch ( std::exception& ex ) { + *error = to_error(ex); + return error->status; } } -void mt_kahypar_get_block_weights(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, - mt_kahypar_hypernode_weight_t* block_weights) { - switch ( partitioned_hg.type ) { - case MULTILEVEL_GRAPH_PARTITIONING: - lib::get_block_weights(utils::cast(partitioned_hg), block_weights); break; - case N_LEVEL_GRAPH_PARTITIONING: - lib::get_block_weights(utils::cast(partitioned_hg), block_weights); break; - case MULTILEVEL_HYPERGRAPH_PARTITIONING: - lib::get_block_weights(utils::cast(partitioned_hg), block_weights); break; - case N_LEVEL_HYPERGRAPH_PARTITIONING: - lib::get_block_weights(utils::cast(partitioned_hg), block_weights); break; - case LARGE_K_PARTITIONING: - lib::get_block_weights(utils::cast(partitioned_hg), block_weights); break; - case NULLPTR_PARTITION: break; - } + +mt_kahypar_partition_id_t mt_kahypar_num_blocks(const mt_kahypar_partitioned_hypergraph_t partitioned_hg) { + return lib::num_blocks(partitioned_hg); } -double mt_kahypar_imbalance(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, - const mt_kahypar_context_t* context) { - const Context& c = *reinterpret_cast(context); - switch ( partitioned_hg.type ) { - case MULTILEVEL_GRAPH_PARTITIONING: - return metrics::imbalance(utils::cast_const(partitioned_hg), c); - case N_LEVEL_GRAPH_PARTITIONING: - return metrics::imbalance(utils::cast_const(partitioned_hg), c); - case MULTILEVEL_HYPERGRAPH_PARTITIONING: - return metrics::imbalance(utils::cast_const(partitioned_hg), c); - case N_LEVEL_HYPERGRAPH_PARTITIONING: - return metrics::imbalance(utils::cast_const(partitioned_hg), c); - case LARGE_K_PARTITIONING: - return metrics::imbalance(utils::cast_const(partitioned_hg), c); - case NULLPTR_PARTITION: return 0; - } - return 0; +mt_kahypar_hypernode_weight_t mt_kahypar_block_weight(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, + const mt_kahypar_partition_id_t block) { + return lib::block_weight(partitioned_hg, block); +} + +mt_kahypar_partition_id_t mt_kahypar_block_id(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, + const mt_kahypar_hypernode_id_t node) { + return lib::block_id(partitioned_hg, node); +} + +mt_kahypar_hyperedge_id_t mt_kahypar_num_incident_cut_hyperedges(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, + const mt_kahypar_hypernode_id_t node) { + return lib::num_incident_cut_edges(partitioned_hg, node); +} + +mt_kahypar_partition_id_t mt_kahypar_connectivity(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, + const mt_kahypar_hyperedge_id_t edge) { + return lib::connectivity(partitioned_hg, edge); +} + +mt_kahypar_hypernode_id_t mt_kahypar_num_pins_in_block(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, + const mt_kahypar_hyperedge_id_t edge, + const mt_kahypar_partition_id_t block) { + return lib::num_pins_in_block(partitioned_hg, edge, block); +} + +void mt_kahypar_get_partition(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, mt_kahypar_partition_id_t* partition) { + lib::get_partition(partitioned_hg, partition); +} + +void mt_kahypar_get_block_weights(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, mt_kahypar_hypernode_weight_t* block_weights) { + lib::get_block_weights(partitioned_hg, block_weights); +} + +double mt_kahypar_imbalance(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, const mt_kahypar_context_t* context) { + return lib::imbalance(partitioned_hg, *reinterpret_cast(context)); } mt_kahypar_hyperedge_weight_t mt_kahypar_cut(const mt_kahypar_partitioned_hypergraph_t partitioned_hg) { - switch ( partitioned_hg.type ) { - case MULTILEVEL_GRAPH_PARTITIONING: - return metrics::quality(utils::cast(partitioned_hg), Objective::cut); - case N_LEVEL_GRAPH_PARTITIONING: - return metrics::quality(utils::cast(partitioned_hg), Objective::cut); - case MULTILEVEL_HYPERGRAPH_PARTITIONING: - return metrics::quality(utils::cast(partitioned_hg), Objective::cut); - case N_LEVEL_HYPERGRAPH_PARTITIONING: - return metrics::quality(utils::cast(partitioned_hg), Objective::cut); - case LARGE_K_PARTITIONING: - return metrics::quality(utils::cast(partitioned_hg), Objective::cut); - case NULLPTR_PARTITION: return 0; - } - return 0; + return lib::cut(partitioned_hg); } mt_kahypar_hyperedge_weight_t mt_kahypar_km1(const mt_kahypar_partitioned_hypergraph_t partitioned_hg) { - switch ( partitioned_hg.type ) { - case MULTILEVEL_GRAPH_PARTITIONING: - return metrics::quality(utils::cast(partitioned_hg), Objective::km1); - case N_LEVEL_GRAPH_PARTITIONING: - return metrics::quality(utils::cast(partitioned_hg), Objective::km1); - case MULTILEVEL_HYPERGRAPH_PARTITIONING: - return metrics::quality(utils::cast(partitioned_hg), Objective::km1); - case N_LEVEL_HYPERGRAPH_PARTITIONING: - return metrics::quality(utils::cast(partitioned_hg), Objective::km1); - case LARGE_K_PARTITIONING: - return metrics::quality(utils::cast(partitioned_hg), Objective::km1); - case NULLPTR_PARTITION: return 0; - } - return 0; + return lib::km1(partitioned_hg); } mt_kahypar_hyperedge_weight_t mt_kahypar_soed(const mt_kahypar_partitioned_hypergraph_t partitioned_hg) { - switch ( partitioned_hg.type ) { - case MULTILEVEL_GRAPH_PARTITIONING: - return metrics::quality(utils::cast(partitioned_hg), Objective::soed); - case N_LEVEL_GRAPH_PARTITIONING: - return metrics::quality(utils::cast(partitioned_hg), Objective::soed); - case MULTILEVEL_HYPERGRAPH_PARTITIONING: - return metrics::quality(utils::cast(partitioned_hg), Objective::soed); - case N_LEVEL_HYPERGRAPH_PARTITIONING: - return metrics::quality(utils::cast(partitioned_hg), Objective::soed); - case LARGE_K_PARTITIONING: - return metrics::quality(utils::cast(partitioned_hg), Objective::soed); - case NULLPTR_PARTITION: return 0; - } - return 0; + return lib::soed(partitioned_hg); } mt_kahypar_hyperedge_weight_t mt_kahypar_steiner_tree(const mt_kahypar_partitioned_hypergraph_t partitioned_hg, @@ -701,39 +666,8 @@ mt_kahypar_hyperedge_weight_t mt_kahypar_steiner_tree(const mt_kahypar_partition if ( !target->isInitialized() ) { target->precomputeDistances(4); } - - switch ( partitioned_hg.type ) { - case MULTILEVEL_GRAPH_PARTITIONING: - { - StaticPartitionedGraph& phg = utils::cast(partitioned_hg); - phg.setTargetGraph(target); - return metrics::quality(phg, Objective::steiner_tree); - } - case N_LEVEL_GRAPH_PARTITIONING: - { - DynamicPartitionedGraph& phg = utils::cast(partitioned_hg); - phg.setTargetGraph(target); - return metrics::quality(phg, Objective::steiner_tree); - } - case MULTILEVEL_HYPERGRAPH_PARTITIONING: - { - StaticPartitionedHypergraph& phg = utils::cast(partitioned_hg); - phg.setTargetGraph(target); - return metrics::quality(phg, Objective::steiner_tree); - } - case N_LEVEL_HYPERGRAPH_PARTITIONING: - { - DynamicPartitionedHypergraph& phg = utils::cast(partitioned_hg); - phg.setTargetGraph(target); - return metrics::quality(phg, Objective::steiner_tree); - } - case LARGE_K_PARTITIONING: - { - SparsePartitionedHypergraph& phg = utils::cast(partitioned_hg); - phg.setTargetGraph(target); - return metrics::quality(phg, Objective::steiner_tree); - } - case NULLPTR_PARTITION: return 0; - } - return 0; -} \ No newline at end of file + return lib::switch_phg(partitioned_hg, [&](auto& phg) { + phg.setTargetGraph(target); + return metrics::quality(phg, Objective::steiner_tree); + }); +} diff --git a/mt-kahypar/datastructures/delta_partitioned_graph.h b/mt-kahypar/datastructures/delta_partitioned_graph.h index 9c1f5e5bf..c3f5c0738 100644 --- a/mt-kahypar/datastructures/delta_partitioned_graph.h +++ b/mt-kahypar/datastructures/delta_partitioned_graph.h @@ -265,19 +265,19 @@ class DeltaPartitionedGraph { // ! Returns an iterator over the connectivity set of hyperedge he (not supported) IteratorRange connectivitySet(const HyperedgeID e) const { - throw NonSupportedOperationException("Not supported for graphs"); + throw UnsupportedOperationException("Not supported for graphs"); return _dummy_connectivity_set.connectivitySet(e); } // ! Returns the number of blocks contained in hyperedge he (not supported) PartitionID connectivity(const HyperedgeID e) const { - throw NonSupportedOperationException("Not supported for graphs"); + throw UnsupportedOperationException("Not supported for graphs"); return _dummy_connectivity_set.connectivity(e); } // ! Creates a deep copy of the connectivity set of hyperedge he (not supported) Bitset& deepCopyOfConnectivitySet(const HyperedgeID he) const { - throw NonSupportedOperationException("Not supported for graphs"); + throw UnsupportedOperationException("Not supported for graphs"); return _dummy_connectivity_set.deepCopy(he); } diff --git a/mt-kahypar/datastructures/dynamic_graph.h b/mt-kahypar/datastructures/dynamic_graph.h index 0f91bf8a6..a55c0c701 100644 --- a/mt-kahypar/datastructures/dynamic_graph.h +++ b/mt-kahypar/datastructures/dynamic_graph.h @@ -599,7 +599,7 @@ class DynamicGraph { // ! Enables a hyperedge (must be disabled before) void enableHyperedge(const HyperedgeID) { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "enableHyperedge() is not supported in dynamic graph"); } @@ -682,7 +682,7 @@ class DynamicGraph { DynamicGraph contract(parallel::scalable_vector&, bool deterministic = false) { unused(deterministic); - throw NonSupportedOperationException( + throw UnsupportedOperationException( "contract(c, id) is not supported in dynamic graph"); return DynamicGraph(); } @@ -768,7 +768,7 @@ class DynamicGraph { * (Not supported.) */ void removeEdge(const HyperedgeID) { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "removeEdge is not supported in dynamic graph"); } @@ -776,7 +776,7 @@ class DynamicGraph { * (Not supported.) */ void removeLargeEdge(const HyperedgeID) { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "removeLargeEdge is not supported in dynamic graph"); } @@ -784,7 +784,7 @@ class DynamicGraph { * (Not supported.) */ void restoreLargeEdge(const HyperedgeID&) { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "restoreLargeEdge is not supported in dynamic graph"); } @@ -821,7 +821,7 @@ class DynamicGraph { } void freeTmpContractionBuffer() { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "freeTmpContractionBuffer() is not supported in dynamic hypergraph"); } diff --git a/mt-kahypar/datastructures/dynamic_hypergraph.h b/mt-kahypar/datastructures/dynamic_hypergraph.h index e1ba6693e..814caf4cb 100644 --- a/mt-kahypar/datastructures/dynamic_hypergraph.h +++ b/mt-kahypar/datastructures/dynamic_hypergraph.h @@ -776,7 +776,7 @@ class DynamicHypergraph { DynamicHypergraph contract(parallel::scalable_vector&, bool deterministic = false) { unused(deterministic); - throw NonSupportedOperationException( + throw UnsupportedOperationException( "contract(c, id) is not supported in dynamic hypergraph"); return DynamicHypergraph(); } @@ -952,7 +952,7 @@ class DynamicHypergraph { } void freeTmpContractionBuffer() { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "freeTmpContractionBuffer() is not supported in dynamic hypergraph"); } diff --git a/mt-kahypar/datastructures/partitioned_hypergraph.h b/mt-kahypar/datastructures/partitioned_hypergraph.h index dc2743508..47828b649 100644 --- a/mt-kahypar/datastructures/partitioned_hypergraph.h +++ b/mt-kahypar/datastructures/partitioned_hypergraph.h @@ -385,21 +385,21 @@ class PartitionedHypergraph { // ! Target of an edge HypernodeID edgeTarget(const HyperedgeID) const { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "edgeTarget(e) is only supported on graph data structure"); return kInvalidHypernode; } // ! Source of an edge HypernodeID edgeSource(const HyperedgeID) const { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "edgeSource(e) is only supported on graph data structure"); return kInvalidHypernode; } // ! Whether the edge is a single pin edge bool isSinglePin(const HyperedgeID) const { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "isSinglePin(e) is only supported on graph data structure"); return false; } diff --git a/mt-kahypar/datastructures/static_graph.h b/mt-kahypar/datastructures/static_graph.h index 43ba585f5..dfd41c4ef 100644 --- a/mt-kahypar/datastructures/static_graph.h +++ b/mt-kahypar/datastructures/static_graph.h @@ -702,7 +702,7 @@ class StaticGraph { // ! Enables a hyperedge (must be disabled before) void enableHyperedge(const HyperedgeID) { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "enableHyperedge() is not supported in static graph"); } @@ -769,7 +769,7 @@ class StaticGraph { StaticGraph contract(parallel::scalable_vector& communities, bool deterministic = false); bool registerContraction(const HypernodeID, const HypernodeID) { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "registerContraction(u, v) is not supported in static graph"); return false; } @@ -777,7 +777,7 @@ class StaticGraph { size_t contract(const HypernodeID, const HypernodeWeight max_node_weight = std::numeric_limits::max()) { unused(max_node_weight); - throw NonSupportedOperationException( + throw UnsupportedOperationException( "contract(v, max_node_weight) is not supported in static graph"); return 0; } @@ -789,12 +789,12 @@ class StaticGraph { unused(mark_edge); unused(case_one_func); unused(case_two_func); - throw NonSupportedOperationException( + throw UnsupportedOperationException( "uncontract(batch) is not supported in static graph"); } VersionedBatchVector createBatchUncontractionHierarchy(const size_t) { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "createBatchUncontractionHierarchy(batch_size) is not supported in static graph"); return { }; } @@ -810,7 +810,7 @@ class StaticGraph { * setting. */ void removeLargeEdge(const HyperedgeID) { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "removeLargeEdge() is not supported in static graph"); } @@ -818,18 +818,18 @@ class StaticGraph { * Restores a large hyperedge previously removed from the hypergraph. */ void restoreLargeEdge(const HyperedgeID&) { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "restoreLargeEdge() is not supported in static graph"); } parallel::scalable_vector removeSinglePinAndParallelHyperedges() { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "removeSinglePinAndParallelHyperedges() is not supported in static graph"); return { }; } void restoreSinglePinAndParallelNets(const parallel::scalable_vector&) { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "restoreSinglePinAndParallelNets(hes_to_restore) is not supported in static graph"); } @@ -877,7 +877,7 @@ class StaticGraph { // ! Only for testing bool verifyIncidenceArrayAndIncidentNets() { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "verifyIncidenceArrayAndIncidentNets() not supported in static graph"); return false; } diff --git a/mt-kahypar/datastructures/static_graph_factory.h b/mt-kahypar/datastructures/static_graph_factory.h index 5610cf5ea..3a0b27e8b 100644 --- a/mt-kahypar/datastructures/static_graph_factory.h +++ b/mt-kahypar/datastructures/static_graph_factory.h @@ -64,7 +64,7 @@ class StaticGraphFactory { const bool stable_construction_of_incident_edges = false); static std::pair > compactify(const StaticGraph&) { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "Compactify not implemented for static graph."); } diff --git a/mt-kahypar/datastructures/static_hypergraph.h b/mt-kahypar/datastructures/static_hypergraph.h index 9d152d2f6..38f6d1439 100644 --- a/mt-kahypar/datastructures/static_hypergraph.h +++ b/mt-kahypar/datastructures/static_hypergraph.h @@ -741,7 +741,7 @@ class StaticHypergraph { StaticHypergraph contract(parallel::scalable_vector& communities, bool deterministic = false); bool registerContraction(const HypernodeID, const HypernodeID) { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "registerContraction(u, v) is not supported in static hypergraph"); return false; } @@ -749,7 +749,7 @@ class StaticHypergraph { size_t contract(const HypernodeID, const HypernodeWeight max_node_weight = std::numeric_limits::max()) { unused(max_node_weight); - throw NonSupportedOperationException( + throw UnsupportedOperationException( "contract(v, max_node_weight) is not supported in static hypergraph"); return 0; } @@ -759,12 +759,12 @@ class StaticHypergraph { const UncontractionFunction& case_two_func = NOOP_BATCH_FUNC) { unused(case_one_func); unused(case_two_func); - throw NonSupportedOperationException( + throw UnsupportedOperationException( "uncontract(batch) is not supported in static hypergraph"); } VersionedBatchVector createBatchUncontractionHierarchy(const size_t) { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "createBatchUncontractionHierarchy(batch_size) is not supported in static hypergraph"); return { }; } @@ -821,13 +821,13 @@ class StaticHypergraph { } parallel::scalable_vector removeSinglePinAndParallelHyperedges() { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "removeSinglePinAndParallelHyperedges() is not supported in static hypergraph"); return { }; } void restoreSinglePinAndParallelNets(const parallel::scalable_vector&) { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "restoreSinglePinAndParallelNets(hes_to_restore) is not supported in static hypergraph"); } @@ -875,7 +875,7 @@ class StaticHypergraph { // ! Only for testing bool verifyIncidenceArrayAndIncidentNets() { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "verifyIncidenceArrayAndIncidentNets() not supported in static hypergraph"); return false; } diff --git a/mt-kahypar/datastructures/static_hypergraph_factory.h b/mt-kahypar/datastructures/static_hypergraph_factory.h index 6e9cde25c..bb08bc003 100644 --- a/mt-kahypar/datastructures/static_hypergraph_factory.h +++ b/mt-kahypar/datastructures/static_hypergraph_factory.h @@ -52,7 +52,7 @@ class StaticHypergraphFactory { const bool stable_construction_of_incident_edges = false); static std::pair> compactify(const StaticHypergraph&) { - throw NonSupportedOperationException( + throw UnsupportedOperationException( "Compactify not implemented for static hypergraph."); } diff --git a/mt-kahypar/io/command_line_options.cpp b/mt-kahypar/io/command_line_options.cpp index 2542f1394..8c7688a5f 100644 --- a/mt-kahypar/io/command_line_options.cpp +++ b/mt-kahypar/io/command_line_options.cpp @@ -898,7 +898,7 @@ namespace mt_kahypar { } - void parseIniToContext(Context& context, const std::string& ini_filename) { + void parseIniToContext(Context& context, const std::string& ini_filename, bool disable_verbose_output) { std::ifstream file(ini_filename.c_str()); if (!file) { throw InvalidInputException( @@ -910,13 +910,20 @@ namespace mt_kahypar { po::store(po::parse_config_file(file, ini_line_options, false), cmd_vm); po::notify(cmd_vm); + if (disable_verbose_output) { + bool verbose_is_manually_set = !cmd_vm.find("verbose")->second.defaulted(); + if (!verbose_is_manually_set) { + context.partition.verbose_output = false; + } + } if (context.partition.deterministic) { context.preprocessing.stable_construction_of_incident_edges = true; } } - void presetToContext(Context& context, const std::vector