Skip to content

Commit 0375e3c

Browse files
committed
Fix window::size() semantics, standardize comm_ naming, simplify tests
Address remaining inconsistencies and improve code clarity: - Standardize communicator member variable naming from com_ to comm_ across all classes for consistency with mpi::window - Fix window::size() semantics to return element count instead of bytes, matching constructor parameter units - Simplify test code in mpi_window.cpp: * Use structured bindings and modern C++ patterns * Remove redundant variables and computations * Clearer comments and better use of mpi::chunk - Fix window::owned_ member initialization ordering to correctly handle both MPI and non-MPI cases - Remove redundant forward declaration of shared_window - Simplify get_communicator() implementations to return comm_ directly - Update build_multi_node.yml workflow with improvements from app4triqs - Minor documentation clarifications in group and window classes
1 parent b7d152b commit 0375e3c

File tree

5 files changed

+80
-99
lines changed

5 files changed

+80
-99
lines changed

.github/workflows/build_multi_node.yml

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
1-
name: build
1+
name: build_multi_node
22

33
on:
44
push:
5-
branches: [ unstable ]
5+
branches: [ unstable, '[0-9]+.[0-9]+.x' ]
66
pull_request:
7-
branches: [ unstable ]
7+
branches: [ unstable, '[0-9]+.[0-9]+.x' ]
88
workflow_call:
99
workflow_dispatch:
1010

@@ -33,8 +33,8 @@ jobs:
3333
- uses: actions/cache/restore@v4
3434
with:
3535
path: ${{ env.CCACHE_DIR }}
36-
key: ccache-${{ matrix.os }}-${{ matrix.cc }}-${{ github.run_id }}
37-
restore-keys:
36+
key: ccache-${{ matrix.os }}-${{ matrix.cc }}-${{ github.run_id }}-${{ github.run_attempt }}
37+
restore-keys: |
3838
ccache-${{ matrix.os }}-${{ matrix.cc }}-
3939
4040
- name: Install ubuntu dependencies
@@ -61,7 +61,7 @@ jobs:
6161
cmake --build build/ --target test
6262
'
6363
64-
- name: Run tests inside the container
64+
- name: Run multi-node window tests
6565
run: |
6666
docker exec -t -u runner -w ${{ github.workspace }} docker-vm-1 /bin/bash -euxc '
6767
cat <<EOF | tee hostfile
@@ -85,4 +85,4 @@ jobs:
8585
if: always()
8686
with:
8787
path: ${{ env.CCACHE_DIR }}
88-
key: ccache-${{ matrix.os }}-${{ matrix.cc }}-${{ github.run_id }}
88+
key: ccache-${{ matrix.os }}-${{ matrix.cc }}-${{ github.run_id }}-${{ github.run_attempt }}

c++/mpi/communicator.hpp

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -55,21 +55,21 @@ namespace mpi {
5555
* @details The `MPI_Comm` object is copied without calling `MPI_Comm_dup`.
5656
* @param c `MPI_Comm` object to wrap.
5757
*/
58-
communicator(MPI_Comm c) : com_(c) {}
58+
communicator(MPI_Comm c) : comm_(c) {}
5959

6060
/// Get the wrapped `MPI_Comm` object.
61-
[[nodiscard]] MPI_Comm get() const noexcept { return com_; }
61+
[[nodiscard]] MPI_Comm get() const noexcept { return comm_; }
6262

6363
/// Check if the contained `MPI_Comm` is `MPI_COMM_NULL`.
64-
[[nodiscard]] bool is_null() const noexcept { return com_ == MPI_COMM_NULL; }
64+
[[nodiscard]] bool is_null() const noexcept { return comm_ == MPI_COMM_NULL; }
6565

6666
/**
6767
* @brief Get the rank of the calling process in the communicator.
6868
* @return The result of `MPI_Comm_rank` if mpi::has_env is true, otherwise 0.
6969
*/
7070
[[nodiscard]] int rank() const {
7171
int r = 0;
72-
if (has_env) check_mpi_call(MPI_Comm_rank(com_, &r), "MPI_Comm_rank");
72+
if (has_env) check_mpi_call(MPI_Comm_rank(comm_, &r), "MPI_Comm_rank");
7373
return r;
7474
}
7575

@@ -79,7 +79,7 @@ namespace mpi {
7979
*/
8080
[[nodiscard]] int size() const {
8181
int s = 1;
82-
if (has_env) check_mpi_call(MPI_Comm_size(com_, &s), "MPI_Comm_size");
82+
if (has_env) check_mpi_call(MPI_Comm_size(comm_, &s), "MPI_Comm_size");
8383
return s;
8484
}
8585

@@ -100,7 +100,7 @@ namespace mpi {
100100
*/
101101
[[nodiscard]] communicator split(int color, int key = 0) const {
102102
communicator c{};
103-
if (has_env) check_mpi_call(MPI_Comm_split(com_, color, key, &c.com_), "MPI_Comm_split");
103+
if (has_env) check_mpi_call(MPI_Comm_split(comm_, color, key, &c.comm_), "MPI_Comm_split");
104104
return c;
105105
}
106106

@@ -134,7 +134,7 @@ namespace mpi {
134134
*/
135135
[[nodiscard]] communicator duplicate() const {
136136
communicator c{};
137-
if (has_env) check_mpi_call(MPI_Comm_dup(com_, &c.com_), "MPI_Comm_dup");
137+
if (has_env) check_mpi_call(MPI_Comm_dup(comm_, &c.comm_), "MPI_Comm_dup");
138138
return c;
139139
}
140140

@@ -148,7 +148,7 @@ namespace mpi {
148148
* Does nothing, if mpi::has_env is false.
149149
*/
150150
void free() {
151-
if (has_env && !is_null()) check_mpi_call(MPI_Comm_free(&com_), "MPI_Comm_free");
151+
if (has_env && !is_null()) check_mpi_call(MPI_Comm_free(&comm_), "MPI_Comm_free");
152152
}
153153

154154
/**
@@ -157,16 +157,16 @@ namespace mpi {
157157
*/
158158
void abort(int error_code) const {
159159
if (has_env) {
160-
check_mpi_call(MPI_Abort(com_, error_code), "MPI_Abort");
160+
check_mpi_call(MPI_Abort(comm_, error_code), "MPI_Abort");
161161
} else {
162162
std::abort();
163163
}
164164
}
165165

166166
#ifdef BOOST_MPI_HPP
167167
// Conversion to and from boost communicator, Keep for backward compatibility
168-
inline operator boost::mpi::communicator() const { return boost::mpi::communicator(_com, boost::mpi::comm_duplicate); }
169-
inline communicator(boost::mpi::communicator c) : _com(c) {}
168+
inline operator boost::mpi::communicator() const { return boost::mpi::communicator(comm_, boost::mpi::comm_duplicate); }
169+
inline communicator(boost::mpi::communicator c) : comm_(c) {}
170170
#endif // BOOST_MPI_HPP
171171

172172
/**
@@ -188,11 +188,11 @@ namespace mpi {
188188
void barrier(long poll_msec = 1) const {
189189
if (has_env) {
190190
if (poll_msec == 0) {
191-
check_mpi_call(MPI_Barrier(com_), "MPI_Barrier");
191+
check_mpi_call(MPI_Barrier(comm_), "MPI_Barrier");
192192
} else {
193193
MPI_Request req{};
194194
int flag = 0;
195-
check_mpi_call(MPI_Ibarrier(com_, &req), "MPI_Ibarrier");
195+
check_mpi_call(MPI_Ibarrier(comm_, &req), "MPI_Ibarrier");
196196
while (!flag) {
197197
check_mpi_call(MPI_Test(&req, &flag, MPI_STATUS_IGNORE), "MPI_Test");
198198
usleep(poll_msec * 1000);
@@ -202,7 +202,7 @@ namespace mpi {
202202
}
203203

204204
private:
205-
MPI_Comm com_ = MPI_COMM_WORLD;
205+
MPI_Comm comm_ = MPI_COMM_WORLD;
206206
};
207207

208208
/**
@@ -224,7 +224,7 @@ namespace mpi {
224224

225225
[[nodiscard]] inline shared_communicator communicator::split_shared(int split_type, int key) const {
226226
shared_communicator c{};
227-
if (has_env) check_mpi_call(MPI_Comm_split_type(com_, split_type, key, MPI_INFO_NULL, &c.com_), "MPI_Comm_split_type");
227+
if (has_env) check_mpi_call(MPI_Comm_split_type(comm_, split_type, key, MPI_INFO_NULL, &c.comm_), "MPI_Comm_split_type");
228228
return c;
229229
}
230230

c++/mpi/group.hpp

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,10 @@ namespace mpi {
3636
* @ingroup mpi_essentials
3737
* @brief C++ wrapper around `MPI_Group` providing various convenience functions.
3838
*
39-
* @details It stores an `MPI_Group` object as its only member which by default is set to `MPI_GROUP_NULL`. The
40-
* underlying `MPI_Group` object is automatically freed when a group object goes out of scope.
39+
* @details It stores an `MPI_Group` object as its only member which by default is set to `MPI_GROUP_NULL`.
40+
* The underlying `MPI_Group` object is automatically freed when a group object goes out of scope.
41+
*
42+
* This class follows move-only semantics and takes ownership of the wrapped `MPI_Group` object.
4143
*
4244
* All functions that make direct calls to the MPI C library throw an exception in case the call fails.
4345
*/
@@ -98,8 +100,8 @@ namespace mpi {
98100
}
99101

100102
/**
101-
* @brief Get the size of the communicator.
102-
* @return The result of `MPI_Comm_size` if mpi::has_env is true, otherwise 1.
103+
* @brief Get the size of the group.
104+
* @return The result of `MPI_Group_size` if mpi::has_env is true, otherwise 1.
103105
*/
104106
[[nodiscard]] int size() const {
105107
int s = 1;

c++/mpi/window.hpp

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -41,9 +41,6 @@ namespace mpi {
4141
* @{
4242
*/
4343

44-
// Forward declaration.
45-
template <class BaseType> class shared_window;
46-
4744
/**
4845
* @brief A C++ wrapper around `MPI_Win` providing convenient memory window management.
4946
*
@@ -53,6 +50,8 @@ namespace mpi {
5350
*
5451
* If a base pointer is not specified, the constructor will allocate memory internally.
5552
*
53+
* This class follows move-only semantics and takes ownership of the wrapped `MPI_Win` object.
54+
*
5655
* @tparam BaseType The type of elements stored in the memory window.
5756
*/
5857
template <class BaseType> class window {
@@ -125,9 +124,9 @@ namespace mpi {
125124
if (has_env) {
126125
check_mpi_call(MPI_Win_allocate(size_ * sizeof(BaseType), sizeof(BaseType), info, c.get(), &data_, &win_), "MPI_Win_allocate");
127126
} else {
128-
owned_ = true;
129127
data_ = new BaseType[size_]; // NOLINT (new is fine here)
130128
}
129+
owned_ = true;
131130
}
132131

133132
/// Convert the window to the wrapped `MPI_Win` object.
@@ -143,7 +142,7 @@ namespace mpi {
143142
* @brief Release allocated resources owned by the window.
144143
*
145144
* @details Before freeing the owned memory or the `MPI_Win` handle, a window must have completed all its
146-
* involvement in RMA communications. For that reason we call fence() before `MPI_Win_free`.
145+
* involvement in RMA communications. For that reason we call `MPI_Win_fence` before `MPI_Win_free`.
147146
*
148147
* The window also must be unlocked if it has been previously locked. However, this cannot be detected and is
149148
* therefore the responsibility of the user.
@@ -154,7 +153,7 @@ namespace mpi {
154153
void free() noexcept {
155154
if (has_env) {
156155
if (win_ != MPI_WIN_NULL) {
157-
fence();
156+
MPI_Win_fence(0, win_);
158157
MPI_Win_free(&win_);
159158
}
160159
} else if (owned_) {
@@ -338,14 +337,14 @@ namespace mpi {
338337
/// Get a pointer to the beginning of the window memory.
339338
[[nodiscard]] BaseType *base() const { return data_; }
340339

341-
/// Get the size of the window in bytes.
342-
[[nodiscard]] MPI_Aint size() const { return size_ * sizeof(BaseType); }
340+
/// Get the size of the window in number of elements.
341+
[[nodiscard]] MPI_Aint size() const { return size_; }
343342

344343
/// Get the displacement unit in bytes.
345344
[[nodiscard]] int disp_unit() const { return sizeof(BaseType); }
346345

347346
/// Get the mpi::communicator associated with the window.
348-
[[nodiscard]] communicator get_communicator() const { return comm_.get(); }
347+
[[nodiscard]] communicator get_communicator() const { return comm_; }
349348

350349
protected:
351350
MPI_Win win_{MPI_WIN_NULL};
@@ -364,7 +363,7 @@ namespace mpi {
364363
*/
365364
template <class BaseType> class shared_window : public window<BaseType> {
366365
public:
367-
///Construct a shared memory window with `MPI_WIN_NULL`.
366+
/// Construct a shared memory window with `MPI_WIN_NULL`.
368367
shared_window() = default;
369368

370369
/**
@@ -384,19 +383,22 @@ namespace mpi {
384383
if (has_env) {
385384
check_mpi_call(MPI_Win_allocate_shared(size_ * sizeof(BaseType), sizeof(BaseType), info, c.get(), &data_, &win_), "MPI_Win_allocate_shared");
386385
} else {
387-
owned_ = true;
388386
data_ = new BaseType[size_]; // NOLINT (new is fine here)
389387
}
388+
owned_ = true;
390389
}
391390

392391
/**
393392
* @brief Query attributes of a shared memory window.
394393
*
395-
* @details Retrieves the size, displacement unit, and a pointer to the beginning of the shared memory region for a
394+
* @details Retrieves the byte-size, displacement unit, and a pointer to the beginning of the shared memory region for a
396395
* specific rank.
397396
*
397+
* When `MPI_PROC_NULL` is passed for the rank, MPI returns information about the memory segment with the lowest rank that has a
398+
* non-zero size.
399+
*
398400
* @param rank Rank within the shared communicator.
399-
* @return A tuple containing the size in bytes, the displacement unit in bytes and the base pointer.
401+
* @return A tuple containing the byte-size, the displacement unit in bytes and the base pointer.
400402
*/
401403
[[nodiscard]] std::tuple<MPI_Aint, int, void *> query(int rank = MPI_PROC_NULL) const {
402404
if (has_env) {

0 commit comments

Comments
 (0)