Skip to content

Commit

Permalink
Use full form for lambdas to avoid confusion with IIFEs.
Browse files Browse the repository at this point in the history
  • Loading branch information
LTLA committed Feb 5, 2025
1 parent fe4061d commit 588eb95
Show file tree
Hide file tree
Showing 2 changed files with 84 additions and 40 deletions.
64 changes: 44 additions & 20 deletions include/tatami_tiledb/DenseMatrix.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -157,18 +157,30 @@ class MyopicCore {
public:
template<typename Value_>
const Value_* fetch_block(Index_ i, Index_ block_start, Value_* buffer) {
return fetch_raw(i, buffer, [&](tiledb::Subarray& subarray, int rowdex) {
my_tdb_non_target_dim.add_range(subarray, rowdex, block_start, my_non_target_length);
});
return fetch_raw(
i,
buffer,
[&](tiledb::Subarray& subarray, int rowdex) -> void {
my_tdb_non_target_dim.add_range(subarray, rowdex, block_start, my_non_target_length);
}
);
}

template<typename Value_>
const Value_* fetch_indices(Index_ i, const std::vector<Index_>& indices, Value_* buffer) {
return fetch_raw(i, buffer, [&](tiledb::Subarray& subarray, int rowdex) {
tatami::process_consecutive_indices<Index_>(indices.data(), indices.size(), [&](Index_ s, Index_ l) {
my_tdb_non_target_dim.add_range(subarray, rowdex, s, l);
});
});
return fetch_raw(
i,
buffer,
[&](tiledb::Subarray& subarray, int rowdex) -> void {
tatami::process_consecutive_indices<Index_>(
indices.data(),
indices.size(),
[&](Index_ s, Index_ l) -> void {
my_tdb_non_target_dim.add_range(subarray, rowdex, s, l);
}
);
}
);
}
};

Expand Down Expand Up @@ -242,7 +254,7 @@ class OracularCore {
my_offset += my_slab_size;
return output;
},
/* populate = */ [&](std::vector<std::pair<Index_, Slab*> >& to_populate, std::vector<std::pair<Index_, Slab*> >& to_reuse) {
/* populate = */ [&](std::vector<std::pair<Index_, Slab*> >& to_populate, std::vector<std::pair<Index_, Slab*> >& to_reuse) -> void {
// Defragmenting the existing chunks. We sort by offset to make
// sure that we're not clobbering in-use slabs during the copy().
sort_by_field(to_reuse, [](const std::pair<Index_, Slab*>& x) -> size_t { return x.second->offset; });
Expand Down Expand Up @@ -314,18 +326,30 @@ class OracularCore {
public:
template<typename Value_>
const Value_* fetch_block(Index_ i, Index_ block_start, Value_* buffer) {
return fetch_raw(i, buffer, [&](tiledb::Subarray& subarray, int rowdex) {
my_tdb_non_target_dim.add_range(subarray, rowdex, block_start, my_non_target_length);
});
return fetch_raw(
i,
buffer,
[&](tiledb::Subarray& subarray, int rowdex) -> void {
my_tdb_non_target_dim.add_range(subarray, rowdex, block_start, my_non_target_length);
}
);
}

template<typename Value_>
const Value_* fetch_indices(Index_ i, const std::vector<Index_>& indices, Value_* buffer) {
return fetch_raw(i, buffer, [&](tiledb::Subarray& subarray, int rowdex) {
tatami::process_consecutive_indices<Index_>(indices.data(), indices.size(), [&](Index_ s, Index_ l) {
my_tdb_non_target_dim.add_range(subarray, rowdex, s, l);
});
});
return fetch_raw(
i,
buffer,
[&](tiledb::Subarray& subarray, int rowdex) -> void {
tatami::process_consecutive_indices<Index_>(
indices.data(),
indices.size(),
[&](Index_ s, Index_ l) -> void {
my_tdb_non_target_dim.add_range(subarray, rowdex, s, l);
}
);
}
);
}
};

Expand Down Expand Up @@ -499,7 +523,7 @@ class DenseMatrix : public tatami::Matrix<Value_, Index_> {
private:
template<class PossibleContext_>
void initialize(const std::string& uri, PossibleContext_ ctx, const DenseMatrixOptions& options) {
serialize([&]() {
serialize([&]() -> void {
my_tdb_comp.reset(
[&]{
// If we have to create our own Context_ object, we do so inside the serialized
Expand All @@ -510,9 +534,9 @@ class DenseMatrix : public tatami::Matrix<Value_, Index_> {
return new DenseMatrix_internal::Components(uri);
}
}(),
[](DenseMatrix_internal::Components* ptr) {
[](DenseMatrix_internal::Components* ptr) -> void {
// Serializing the deleter, for completeness's sake.
serialize([&]() {
serialize([&]() -> void {
delete ptr;
});
}
Expand Down
60 changes: 40 additions & 20 deletions include/tatami_tiledb/SparseMatrix.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ class MyopicCore {
Index_ chunk_length = std::min(my_target_dim_extent - chunk_start, my_target_chunk_length);

size_t num_nonzero = 0;
serialize([&]() {
serialize([&]() -> void {
tiledb::Subarray subarray(my_tdb_comp.ctx, my_tdb_comp.array);
int rowdex = my_row;
my_tdb_target_dim.add_range(subarray, 1 - rowdex, chunk_start, chunk_length);
Expand Down Expand Up @@ -234,17 +234,27 @@ class MyopicCore {

public:
std::pair<size_t, size_t> fetch_block(Index_ i, Index_ block_start, Index_ block_length) {
return fetch_raw(i, [&](tiledb::Subarray& subarray, int rowdex) {
my_tdb_non_target_dim.add_range(subarray, rowdex, block_start, block_length);
});
return fetch_raw(
i,
[&](tiledb::Subarray& subarray, int rowdex) -> void {
my_tdb_non_target_dim.add_range(subarray, rowdex, block_start, block_length);
}
);
}

std::pair<size_t, size_t> fetch_indices(Index_ i, const std::vector<Index_>& indices) {
return fetch_raw(i, [&](tiledb::Subarray& subarray, int rowdex) {
tatami::process_consecutive_indices<Index_>(indices.data(), indices.size(), [&](Index_ s, Index_ l) {
my_tdb_non_target_dim.add_range(subarray, rowdex, s, l);
});
});
return fetch_raw(
i,
[&](tiledb::Subarray& subarray, int rowdex) -> void {
tatami::process_consecutive_indices<Index_>(
indices.data(),
indices.size(),
[&](Index_ s, Index_ l) -> void {
my_tdb_non_target_dim.add_range(subarray, rowdex, s, l);
}
);
}
);
}

public:
Expand Down Expand Up @@ -370,7 +380,7 @@ class OracularCore {
/* create = */ [&]() -> Slab {
return Slab();
},
/* populate = */ [&](std::vector<std::pair<Index_, size_t> >& to_populate, std::vector<std::pair<Index_, size_t> >& to_reuse, std::vector<Slab>& all_slabs) {
/* populate = */ [&](std::vector<std::pair<Index_, size_t> >& to_populate, std::vector<std::pair<Index_, size_t> >& to_reuse, std::vector<Slab>& all_slabs) -> void {
// Defragmenting the existing chunks. We sort by offset to make
// sure that we're not clobbering in-use slabs during the copy().
sort_by_field(to_reuse, [&](const std::pair<Index_, size_t>& x) -> size_t { return all_slabs[x.second].offset; });
Expand Down Expand Up @@ -475,17 +485,27 @@ class OracularCore {

public:
std::pair<size_t, size_t> fetch_block(Index_ i, Index_ block_start, Index_ block_length) {
return fetch_raw(i, [&](tiledb::Subarray& subarray, int rowdex) {
my_tdb_non_target_dim.add_range(subarray, rowdex, block_start, block_length);
});
return fetch_raw(
i,
[&](tiledb::Subarray& subarray, int rowdex) -> void {
my_tdb_non_target_dim.add_range(subarray, rowdex, block_start, block_length);
}
);
}

std::pair<size_t, size_t> fetch_indices(Index_ i, const std::vector<Index_>& indices) {
return fetch_raw(i, [&](tiledb::Subarray& subarray, int rowdex) {
tatami::process_consecutive_indices<Index_>(indices.data(), indices.size(), [&](Index_ s, Index_ l) {
my_tdb_non_target_dim.add_range(subarray, rowdex, s, l);
});
});
return fetch_raw(
i,
[&](tiledb::Subarray& subarray, int rowdex) -> void {
tatami::process_consecutive_indices<Index_>(
indices.data(),
indices.size(),
[&](Index_ s, Index_ l) -> void {
my_tdb_non_target_dim.add_range(subarray, rowdex, s, l);
}
);
}
);
}

public:
Expand Down Expand Up @@ -929,7 +949,7 @@ class SparseMatrix : public tatami::Matrix<Value_, Index_> {
private:
template<class PossibleContext_>
void initialize(const std::string& uri, PossibleContext_ ctx, const SparseMatrixOptions& options) {
serialize([&]() {
serialize([&]() -> void {
my_tdb_comp.reset(
[&]{
// If we have to create our own Context_ object, we do so inside the serialized
Expand All @@ -942,7 +962,7 @@ class SparseMatrix : public tatami::Matrix<Value_, Index_> {
}(),
[](SparseMatrix_internal::Components* ptr) {
// Serializing the deleter, for completeness's sake.
serialize([&]() {
serialize([&]() -> void {
delete ptr;
});
}
Expand Down

0 comments on commit 588eb95

Please sign in to comment.