Skip to content

Commit

Permalink
Merge branch 'main' into bb/stable-subcell-tests
Browse files Browse the repository at this point in the history
  • Loading branch information
bennibolm committed Sep 5, 2024
2 parents 36ee2c6 + f9c9107 commit 9020783
Show file tree
Hide file tree
Showing 11 changed files with 107 additions and 79 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/SpellCheck.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,4 @@ jobs:
- name: Checkout Actions Repository
uses: actions/checkout@v4
- name: Check spelling
uses: crate-ci/typos@v1.23.6
uses: crate-ci/typos@v1.24.3
10 changes: 6 additions & 4 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ PrecompileTools = "aea7be01-6a6a-4083-8856-8a6e6704d82a"
Preferences = "21216c6a-2e73-6563-6e65-726566657250"
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01"
RecursiveArrayTools = "731186ca-8d62-57ce-b412-fbd966d074cd"
Reexport = "189a3867-3050-52da-a836-e630ba90ab69"
Requires = "ae029012-a4dd-5104-9daa-d747884805df"
SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462"
Expand All @@ -50,13 +51,13 @@ TrixiBase = "9a0f1c46-06d5-4909-a5a3-ce25d3fa3284"
UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"

[weakdeps]
Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a"
Convex = "f65535da-76fb-5f13-bab9-19810c17039a"
ECOS = "e2685f51-7e38-5353-a97d-a921fd2c8199"
Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a"

[extensions]
TrixiMakieExt = "Makie"
TrixiConvexECOSExt = ["Convex", "ECOS"]
TrixiMakieExt = "Makie"

[compat]
CodeTracking = "1.0.5"
Expand Down Expand Up @@ -87,12 +88,13 @@ PrecompileTools = "1.1"
Preferences = "1.3"
Printf = "1"
RecipesBase = "1.1"
RecursiveArrayTools = "2.38.10"
Reexport = "1.0"
Requires = "1.1"
SciMLBase = "1.90, 2"
SimpleUnPack = "1.1"
SparseArrays = "1"
StartUpDG = "0.17.7"
StartUpDG = "0.17.7, 1.1.5"
Static = "0.8.7"
StaticArrayInterface = "1.4"
StaticArrays = "1.5"
Expand All @@ -109,6 +111,6 @@ UUIDs = "1.6"
julia = "1.8"

[extras]
Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a"
Convex = "f65535da-76fb-5f13-bab9-19810c17039a"
ECOS = "e2685f51-7e38-5353-a97d-a921fd2c8199"
Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a"
6 changes: 5 additions & 1 deletion examples/tree_2d_dgsem/elixir_euler_vortex_amr.jl
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ function (indicator_vortex::IndicatorVortex)(u::AbstractArray{<:Any, 4},
t, kwargs...)
mesh = indicator_vortex.cache.mesh
alpha = indicator_vortex.cache.alpha
indicator_threaded = indicator_vortex.cache.indicator_threaded
resize!(alpha, nelements(dg, cache))

# get analytical vortex center (based on assumption that center=[0.0,0.0]
Expand Down Expand Up @@ -57,6 +56,11 @@ function periodic_distance_2d(coordinates, center, domain_length)
return sqrt(sum(abs2, dx_periodic))
end

# Optional: Nicer display of the indicator
function Base.show(io::IO, ::MIME"text/plain", indicator::IndicatorVortex)
Trixi.summary_box(io, "IndicatorVortex")
end

end # module TrixiExtension

import .TrixiExtension
Expand Down
2 changes: 2 additions & 0 deletions src/callbacks_step/amr.jl
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh,
@unpack to_refine, to_coarsen = amr_callback.amr_cache
empty!(to_refine)
empty!(to_coarsen)
# Note: This assumes that the entries of `lambda` are sorted with ascending cell ids
for element in eachindex(lambda)
controller_value = lambda[element]
if controller_value > 0
Expand Down Expand Up @@ -395,6 +396,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh,
@unpack to_refine, to_coarsen = amr_callback.amr_cache
empty!(to_refine)
empty!(to_coarsen)
# Note: This assumes that the entries of `lambda` are sorted with ascending cell ids
for element in eachindex(lambda)
controller_value = lambda[element]
if controller_value > 0
Expand Down
2 changes: 1 addition & 1 deletion src/meshes/parallel_tree.jl
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
#
# An exception to the 2:1 rule exists for the low-level `refine_unbalanced!`
# function, which is required for implementing level-wise refinement in a sane
# way. Also, depth-first ordering *might* not by guaranteed during
# way. Also, depth-first ordering *might* not be guaranteed during
# refinement/coarsening operations.
mutable struct ParallelTree{NDIMS} <: AbstractTree{NDIMS}
parent_ids::Vector{Int}
Expand Down
30 changes: 17 additions & 13 deletions src/meshes/parallel_tree_mesh.jl
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ function partition!(mesh::ParallelTreeMesh; allow_coarsening = true)
n_leaves_per_rank = OffsetArray(fill(div(length(leaves), mpi_nranks()),
mpi_nranks()),
0:(mpi_nranks() - 1))
for d in 0:(rem(length(leaves), mpi_nranks()) - 1)
n_leaves_per_rank[d] += 1
for rank in 0:(rem(length(leaves), mpi_nranks()) - 1)
n_leaves_per_rank[rank] += 1
end
@assert sum(n_leaves_per_rank) == length(leaves)

Expand All @@ -31,17 +31,20 @@ function partition!(mesh::ParallelTreeMesh; allow_coarsening = true)
mesh.n_cells_by_rank = similar(n_leaves_per_rank)

leaf_count = 0
# Assign first cell to rank 0 (employ depth-first indexing of cells)
mesh.first_cell_by_rank[0] = 1
# Iterate over all ranks
for d in 0:(mpi_nranks() - 1)
leaf_count += n_leaves_per_rank[d]
for rank in 0:(mpi_nranks() - 1)
leaf_count += n_leaves_per_rank[rank]
last_id = leaves[leaf_count]
parent_id = mesh.tree.parent_ids[last_id]

# Check if all children of the last parent are leaves
# If coarsening is allowed, we need to make sure that parents of leaves
# are on the same rank as the leaves when coarsened.
if allow_coarsening &&
# Check if all children of the last parent are leaves
all(id -> is_leaf(mesh.tree, id), @view mesh.tree.child_ids[:, parent_id]) &&
d < length(n_leaves_per_rank) - 1
rank < length(n_leaves_per_rank) - 1 # Make sure there is another rank

# To keep children of parent together if they are all leaves,
# all children are added to this rank
Expand All @@ -53,20 +56,21 @@ function partition!(mesh::ParallelTreeMesh; allow_coarsening = true)
additional_cells)
leaf_count += additional_leaves
# Add leaves to this rank, remove from next rank
n_leaves_per_rank[d] += additional_leaves
n_leaves_per_rank[d + 1] -= additional_leaves
n_leaves_per_rank[rank] += additional_leaves
n_leaves_per_rank[rank + 1] -= additional_leaves
end
end

@assert all(n -> n > 0, n_leaves_per_rank) "Too many ranks to properly partition the mesh!"

mesh.n_cells_by_rank[d] = last_id - mesh.first_cell_by_rank[d] + 1
mesh.tree.mpi_ranks[mesh.first_cell_by_rank[d]:last_id] .= d
mesh.n_cells_by_rank[rank] = last_id - mesh.first_cell_by_rank[rank] + 1
# Use depth-first indexing of cells again to assign also non leaf cells
mesh.tree.mpi_ranks[mesh.first_cell_by_rank[rank]:last_id] .= rank

# Set first cell of next rank
if d < length(n_leaves_per_rank) - 1
mesh.first_cell_by_rank[d + 1] = mesh.first_cell_by_rank[d] +
mesh.n_cells_by_rank[d]
if rank < length(n_leaves_per_rank) - 1 # Make sure there is another rank
mesh.first_cell_by_rank[rank + 1] = mesh.first_cell_by_rank[rank] +
mesh.n_cells_by_rank[rank]
end
end

Expand Down
65 changes: 34 additions & 31 deletions src/solvers/dgsem_p4est/dg_parallel.jl
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,10 @@ function start_mpi_send!(mpi_cache::P4estMPICache, mesh, equations, dg, cache)
data_size = nvariables(equations) * nnodes(dg)^(ndims(mesh) - 1)
n_small_elements = 2^(ndims(mesh) - 1)

for d in 1:length(mpi_cache.mpi_neighbor_ranks)
send_buffer = mpi_cache.mpi_send_buffers[d]
for rank in 1:length(mpi_cache.mpi_neighbor_ranks)
send_buffer = mpi_cache.mpi_send_buffers[rank]

for (index, interface) in enumerate(mpi_cache.mpi_neighbor_interfaces[d])
for (index, interface) in enumerate(mpi_cache.mpi_neighbor_interfaces[rank])
first = (index - 1) * data_size + 1
last = (index - 1) * data_size + data_size
local_side = cache.mpi_interfaces.local_sides[interface]
Expand All @@ -62,14 +62,15 @@ function start_mpi_send!(mpi_cache::P4estMPICache, mesh, equations, dg, cache)

# Set send_buffer corresponding to mortar data to NaN and overwrite the parts where local
# data exists
interfaces_data_size = length(mpi_cache.mpi_neighbor_interfaces[d]) * data_size
mortars_data_size = length(mpi_cache.mpi_neighbor_mortars[d]) *
interfaces_data_size = length(mpi_cache.mpi_neighbor_interfaces[rank]) *
data_size
mortars_data_size = length(mpi_cache.mpi_neighbor_mortars[rank]) *
n_small_elements * 2 * data_size
# `NaN |> eltype(...)` ensures that the NaN's are of the appropriate floating point type
send_buffer[(interfaces_data_size + 1):(interfaces_data_size + mortars_data_size)] .= NaN |>
eltype(mpi_cache)

for (index, mortar) in enumerate(mpi_cache.mpi_neighbor_mortars[d])
for (index, mortar) in enumerate(mpi_cache.mpi_neighbor_mortars[rank])
index_base = interfaces_data_size +
(index - 1) * n_small_elements * 2 * data_size
indices = buffer_mortar_indices(mesh, index_base, data_size)
Expand All @@ -91,18 +92,18 @@ function start_mpi_send!(mpi_cache::P4estMPICache, mesh, equations, dg, cache)
end

# Start sending
for (index, d) in enumerate(mpi_cache.mpi_neighbor_ranks)
for (index, rank) in enumerate(mpi_cache.mpi_neighbor_ranks)
mpi_cache.mpi_send_requests[index] = MPI.Isend(mpi_cache.mpi_send_buffers[index],
d, mpi_rank(), mpi_comm())
rank, mpi_rank(), mpi_comm())
end

return nothing
end

function start_mpi_receive!(mpi_cache::P4estMPICache)
for (index, d) in enumerate(mpi_cache.mpi_neighbor_ranks)
for (index, rank) in enumerate(mpi_cache.mpi_neighbor_ranks)
mpi_cache.mpi_recv_requests[index] = MPI.Irecv!(mpi_cache.mpi_recv_buffers[index],
d, d, mpi_comm())
rank, rank, mpi_comm())
end

return nothing
Expand All @@ -118,11 +119,11 @@ function finish_mpi_receive!(mpi_cache::P4estMPICache, mesh, equations, dg, cach
n_positions = n_small_elements + 1

# Start receiving and unpack received data until all communication is finished
d = MPI.Waitany(mpi_cache.mpi_recv_requests)
while d !== nothing
recv_buffer = mpi_cache.mpi_recv_buffers[d]
data = MPI.Waitany(mpi_cache.mpi_recv_requests)
while data !== nothing
recv_buffer = mpi_cache.mpi_recv_buffers[data]

for (index, interface) in enumerate(mpi_cache.mpi_neighbor_interfaces[d])
for (index, interface) in enumerate(mpi_cache.mpi_neighbor_interfaces[data])
first = (index - 1) * data_size + 1
last = (index - 1) * data_size + data_size

Expand All @@ -133,8 +134,9 @@ function finish_mpi_receive!(mpi_cache::P4estMPICache, mesh, equations, dg, cach
end
end

interfaces_data_size = length(mpi_cache.mpi_neighbor_interfaces[d]) * data_size
for (index, mortar) in enumerate(mpi_cache.mpi_neighbor_mortars[d])
interfaces_data_size = length(mpi_cache.mpi_neighbor_interfaces[data]) *
data_size
for (index, mortar) in enumerate(mpi_cache.mpi_neighbor_mortars[data])
index_base = interfaces_data_size +
(index - 1) * n_small_elements * 2 * data_size
indices = buffer_mortar_indices(mesh, index_base, data_size)
Expand All @@ -155,7 +157,7 @@ function finish_mpi_receive!(mpi_cache::P4estMPICache, mesh, equations, dg, cach
end
end

d = MPI.Waitany(mpi_cache.mpi_recv_requests)
data = MPI.Waitany(mpi_cache.mpi_recv_requests)
end

return nothing
Expand Down Expand Up @@ -311,10 +313,10 @@ function init_mpi_neighbor_connectivity(mpi_interfaces, mpi_mortars,
# For each neighbor rank, init connectivity data structures
mpi_neighbor_interfaces = Vector{Vector{Int}}(undef, length(mpi_neighbor_ranks))
mpi_neighbor_mortars = Vector{Vector{Int}}(undef, length(mpi_neighbor_ranks))
for (index, d) in enumerate(mpi_neighbor_ranks)
mpi_neighbor_interfaces[index] = interface_ids[findall(==(d),
for (index, rank) in enumerate(mpi_neighbor_ranks)
mpi_neighbor_interfaces[index] = interface_ids[findall(==(rank),
neighbor_ranks_interface)]
mpi_neighbor_mortars[index] = mortar_ids[findall(x -> (d in x),
mpi_neighbor_mortars[index] = mortar_ids[findall(x -> (rank in x),
neighbor_ranks_mortar)]
end

Expand Down Expand Up @@ -519,10 +521,10 @@ function exchange_normal_directions!(mpi_mortars, mpi_cache,
recv_requests = Vector{MPI.Request}(undef, length(mpi_neighbor_mortars))

# Fill send buffers
for d in 1:length(mpi_neighbor_ranks)
send_buffer = send_buffers[d]
for rank in 1:length(mpi_neighbor_ranks)
send_buffer = send_buffers[rank]

for (index, mortar) in enumerate(mpi_neighbor_mortars[d])
for (index, mortar) in enumerate(mpi_neighbor_mortars[rank])
index_base = (index - 1) * n_small_elements * data_size
indices = buffer_mortar_indices(mesh, index_base, data_size)
for position in mpi_mortars.local_neighbor_positions[mortar]
Expand All @@ -538,17 +540,18 @@ function exchange_normal_directions!(mpi_mortars, mpi_cache,
end

# Start data exchange
for (index, d) in enumerate(mpi_neighbor_ranks)
send_requests[index] = MPI.Isend(send_buffers[index], d, mpi_rank(), mpi_comm())
recv_requests[index] = MPI.Irecv!(recv_buffers[index], d, d, mpi_comm())
for (index, rank) in enumerate(mpi_neighbor_ranks)
send_requests[index] = MPI.Isend(send_buffers[index], rank, mpi_rank(),
mpi_comm())
recv_requests[index] = MPI.Irecv!(recv_buffers[index], rank, rank, mpi_comm())
end

# Unpack data from receive buffers
d = MPI.Waitany(recv_requests)
while d !== nothing
recv_buffer = recv_buffers[d]
data = MPI.Waitany(recv_requests)
while data !== nothing
recv_buffer = recv_buffers[data]

for (index, mortar) in enumerate(mpi_neighbor_mortars[d])
for (index, mortar) in enumerate(mpi_neighbor_mortars[data])
index_base = (index - 1) * n_small_elements * data_size
indices = buffer_mortar_indices(mesh, index_base, data_size)
for position in 1:n_small_elements
Expand All @@ -563,7 +566,7 @@ function exchange_normal_directions!(mpi_mortars, mpi_cache,
end
end

d = MPI.Waitany(recv_requests)
data = MPI.Waitany(recv_requests)
end

# Wait for communication to finish
Expand Down
6 changes: 3 additions & 3 deletions src/solvers/dgsem_t8code/dg_parallel.jl
Original file line number Diff line number Diff line change
Expand Up @@ -119,10 +119,10 @@ function init_mpi_neighbor_connectivity(mpi_mesh_info, mesh::ParallelT8codeMesh)
# For each neighbor rank, init connectivity data structures
mpi_neighbor_interfaces = Vector{Vector{Int}}(undef, length(mpi_neighbor_ranks))
mpi_neighbor_mortars = Vector{Vector{Int}}(undef, length(mpi_neighbor_ranks))
for (index, d) in enumerate(mpi_neighbor_ranks)
mpi_neighbor_interfaces[index] = interface_ids[findall(==(d),
for (index, rank) in enumerate(mpi_neighbor_ranks)
mpi_neighbor_interfaces[index] = interface_ids[findall(==(rank),
neighbor_ranks_interface)]
mpi_neighbor_mortars[index] = mortar_ids[findall(x -> (d in x),
mpi_neighbor_mortars[index] = mortar_ids[findall(x -> (rank in x),
neighbor_ranks_mortar)]
end

Expand Down
6 changes: 6 additions & 0 deletions src/solvers/dgsem_tree/containers_2d.jl
Original file line number Diff line number Diff line change
Expand Up @@ -767,6 +767,7 @@ end
# Container data structure (structure-of-arrays style) for DG MPI interfaces
mutable struct MPIInterfaceContainer2D{uEltype <: Real} <: AbstractContainer
u::Array{uEltype, 4} # [leftright, variables, i, interfaces]
# Note: `local_neighbor_ids` stores the MPI-local neighbors, but with globally valid index!
local_neighbor_ids::Vector{Int} # [interfaces]
orientations::Vector{Int} # [interfaces]
remote_sides::Vector{Int} # [interfaces]
Expand Down Expand Up @@ -907,6 +908,8 @@ function init_mpi_interfaces!(mpi_interfaces, elements, mesh::TreeMesh2D)

# Create interface between elements
count += 1
# Note: `local_neighbor_ids` stores the MPI-local neighbors,
# but with globally valid index!
mpi_interfaces.local_neighbor_ids[count] = element

if iseven(direction) # element is "left" of interface, remote cell is "right" of interface
Expand Down Expand Up @@ -941,6 +944,7 @@ end
mutable struct MPIL2MortarContainer2D{uEltype <: Real} <: AbstractContainer
u_upper::Array{uEltype, 4} # [leftright, variables, i, mortars]
u_lower::Array{uEltype, 4} # [leftright, variables, i, mortars]
# Note: `local_neighbor_ids` stores the MPI-local neighbors, but with globally valid index!
local_neighbor_ids::Vector{Vector{Int}} # [mortars][ids]
local_neighbor_positions::Vector{Vector{Int}} # [mortars][positions]
# Large sides: left -> 1, right -> 2
Expand Down Expand Up @@ -1214,6 +1218,8 @@ function init_mpi_mortars!(mpi_mortars, elements, mesh::TreeMesh2D)
# 3 -> large element
count += 1

# Note: `local_neighbor_ids` stores the MPI-local neighbors,
# but with globally valid index!
local_neighbor_ids = Vector{Int}()
local_neighbor_positions = Vector{Int}()
if is_own_cell(mesh.tree, lower_cell_id)
Expand Down
Loading

0 comments on commit 9020783

Please sign in to comment.