Skip to content

Commit

Permalink
Merge branch 'main' of github.com:gridap/GridapP4est.jl into facet_in…
Browse files Browse the repository at this point in the history
…tegration_non_conforming_meshes
  • Loading branch information
amartinhuertas committed Dec 19, 2023
2 parents 1fb85fe + 6b14086 commit 058f3f1
Show file tree
Hide file tree
Showing 5 changed files with 130 additions and 73 deletions.
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "GridapP4est"
uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9"
authors = ["Alberto F. Martin <[email protected]>"]
version = "0.3.3"
version = "0.3.4"

[deps]
ArgParse = "c7e460c6-2fb9-53a9-8c5b-16f535851c63"
Expand Down
157 changes: 101 additions & 56 deletions src/OctreeDistributedDiscreteModels.jl
Original file line number Diff line number Diff line change
Expand Up @@ -474,25 +474,26 @@ function pXest_partition_given!(::Type{Val{Dc}}, ptr_pXest, new_num_cells_per_pa
if (Dc==2)
p4est_partition_given(ptr_pXest, new_num_cells_per_part)
else
@assert false
p8est_partition_given(ptr_pXest, new_num_cells_per_part)
end
end

function pXest_uniformly_refine!(::Type{Val{Dc}}, ptr_pXest) where Dc
# Refine callback
function refine_fn(::Ptr{p4est_t},
which_tree::p4est_topidx_t,
quadrant::Ptr{p4est_quadrant_t})
# Refine callbacks
function refine_fn_2d(::Ptr{p4est_t},which_tree::p4est_topidx_t,quadrant::Ptr{p4est_quadrant_t})
return Cint(1)
end
function refine_fn_3d(::Ptr{p8est_t},which_tree::p4est_topidx_t,quadrant::Ptr{p8est_quadrant_t})
return Cint(1)
end
# C-callable refine callback
refine_fn_c=@cfunction($refine_fn,
Cint,
(Ptr{p4est_t}, p4est_topidx_t, Ptr{p4est_quadrant_t}))
if (Dc==2)
# C-callable refine callback 2D
refine_fn_c = @cfunction($refine_fn_2d,Cint,(Ptr{p4est_t}, p4est_topidx_t, Ptr{p4est_quadrant_t}))
p4est_refine(ptr_pXest, Cint(0), refine_fn_c, C_NULL)
else
@assert false
# C-callable refine callback 3D
refine_fn_c = @cfunction($refine_fn_3d,Cint,(Ptr{p8est_t}, p4est_topidx_t, Ptr{p8est_quadrant_t}))
p8est_refine(ptr_pXest, Cint(0), refine_fn_c, C_NULL)
end
end

Expand All @@ -505,20 +506,20 @@ function pXest_refine!(::Type{Val{Dc}}, ptr_pXest, refine_fn_c, refine_replace_f
end

function pXest_uniformly_coarsen!(::Type{Val{Dc}}, ptr_pXest) where Dc
# Coarsen callback
function coarsen_fn(::Ptr{p4est_t},
::p4est_topidx_t,
::Ptr{Ptr{p4est_quadrant_t}})
# Coarsen callbacks
function coarsen_fn_2d(::Ptr{p4est_t},::p4est_topidx_t,::Ptr{Ptr{p4est_quadrant_t}})
return Cint(1)
end
function coarsen_fn_3d(::Ptr{p8est_t},::p4est_topidx_t,::Ptr{Ptr{p8est_quadrant_t}})
return Cint(1)
end
# C-callable coasen callback
coarsen_fn_c=@cfunction($coarsen_fn,
Cint,
(Ptr{p4est_t}, p4est_topidx_t, Ptr{Ptr{p4est_quadrant_t}}))
if (Dc==2)
# C-callable coasen callback
coarsen_fn_c=@cfunction($coarsen_fn_2d,Cint,(Ptr{p4est_t}, p4est_topidx_t, Ptr{Ptr{p4est_quadrant_t}}))
p4est_coarsen(ptr_pXest, Cint(0), coarsen_fn_c, C_NULL)
else
@assert false
coarsen_fn_c=@cfunction($coarsen_fn_3d,Cint,(Ptr{p8est_t}, p4est_topidx_t, Ptr{Ptr{p8est_quadrant_t}}))
p8est_coarsen(ptr_pXest, Cint(0), coarsen_fn_c, C_NULL)
end
end

Expand Down Expand Up @@ -599,8 +600,7 @@ function _compute_fine_to_coarse_model_glue(

# Fill data for owned (from coarse cells owned by the processor)
fgids = get_cell_gids(fmodel)
f1,f2,f3, cgids_snd, cgids_rcv = map(fmodel.models,
partition(fgids)) do fmodel, fpartition
f1,f2,f3, cgids_snd, cgids_rcv = map(local_views(fmodel),partition(fgids)) do fmodel, fpartition
if (!(GridapDistributed.i_am_in(cparts)))
# cmodel might be distributed among less processes than fmodel
nothing, nothing, nothing, Int[], Int[]
Expand All @@ -613,7 +613,6 @@ function _compute_fine_to_coarse_model_glue(
fcell_to_child_id =
_process_owned_cells_fine_to_coarse_model_glue(cmodel_local,fmodel,cpartition,fpartition)


# Note: Reversing snd and rcv
lids_rcv,lids_snd = map(PArrays.getany,assembly_local_indices(partition(fgids)))
cgids_data = local_to_global(cpartition)[fine_to_coarse_faces_map[Dc+1][lids_snd.data]]
Expand Down Expand Up @@ -758,16 +757,16 @@ function _compute_fine_to_coarse_model_glue(
fcell_to_child_id::Union{Nothing,MPIArray})
fgids=get_cell_gids(fmodel)
map(fmodel.models,fine_to_coarse_faces_map,fcell_to_child_id) do fmodel, fine_to_coarse_faces_map, fcell_to_child_id
if (!(GridapDistributed.i_am_in(cparts)))
# cmodel might be distributed among less processes than fmodel
Int[], Int[], Int[], Int[]
else
if GridapDistributed.i_am_in(cparts)
cgids = get_cell_gids(cmodel)
cpartition = PArrays.getany(partition(cgids))
_setup_communication_buffers_fine_partition(fine_to_coarse_faces_map,
fcell_to_child_id,
partition(fgids),
cpartition)
else
# cmodel might be distributed among less processes than fmodel
Int[], Int[], Int[], Int[]
end
end |> tuple_of_arrays
end
Expand Down Expand Up @@ -1387,7 +1386,8 @@ function Gridap.Adaptivity.refine(model::OctreeDistributedDiscreteModel{Dc,Dp};
if GridapDistributed.i_am_in(new_comm)
if !isa(parts,Nothing)
aux = ptr_new_pXest
ptr_new_pXest = _p4est_to_new_comm(ptr_new_pXest,
ptr_new_pXest = _pXest_to_new_comm(Val{Dc},
ptr_new_pXest,
model.ptr_pXest_connectivity,
model.parts.comm,
parts.comm)
Expand Down Expand Up @@ -1736,21 +1736,63 @@ function Gridap.Adaptivity.coarsen(model::OctreeDistributedDiscreteModel{Dc,Dp})
end
end

function pXest_deflate_quadrants(::Type{Val{Dc}},ptr_pXest,data) where Dc
if Dc ==2
P4est_wrapper.p4est_deflate_quadrants(ptr_pXest,data)
else
P4est_wrapper.p8est_deflate_quadrants(ptr_pXest,data)
end
end

function pXest_comm_count_pertree(::Type{Val{Dc}},ptr_pXest,pertree) where Dc
if Dc == 2
p4est_comm_count_pertree(ptr_pXest,pertree)
else
p8est_comm_count_pertree(ptr_pXest,pertree)
end
end

function pXest_inflate(::Type{Val{Dc}},
comm,
ptr_pXest_conn,
global_first_quadrant,
pertree,
quadrants,
data,
user_pointer) where Dc
if Dc == 2
P4est_wrapper.p4est_inflate(comm,
ptr_pXest_conn,
global_first_quadrant,
pertree,
quadrants,
data,
user_pointer)
else
P4est_wrapper.p8est_inflate(comm,
ptr_pXest_conn,
global_first_quadrant,
pertree,
quadrants,
data,
user_pointer)
end
end

# We have a p4est distributed among P processors. This function
# instantiates the same among Q processors.
function _p4est_to_new_comm(ptr_pXest, ptr_pXest_conn, old_comm, new_comm)
A=is_included(old_comm,new_comm) # old \subset new (smaller to larger nparts)
B=is_included(new_comm,old_comm) # old \supset new (larger to smaller nparts)
function _pXest_to_new_comm(::Type{Val{Dc}},ptr_pXest, ptr_pXest_conn, old_comm, new_comm) where Dc
A = is_included(old_comm,new_comm) # old \subset new (smaller to larger nparts)
B = is_included(new_comm,old_comm) # old \supset new (larger to smaller nparts)
@assert xor(A,B)
if (A)
_p4est_to_new_comm_old_subset_new(ptr_pXest, ptr_pXest_conn, old_comm, new_comm)
_pXest_to_new_comm_old_subset_new(Val{Dc},ptr_pXest, ptr_pXest_conn, old_comm, new_comm)
else
_p4est_to_new_comm_old_supset_new(ptr_pXest, ptr_pXest_conn, old_comm, new_comm)
_pXest_to_new_comm_old_supset_new(Val{Dc},ptr_pXest, ptr_pXest_conn, old_comm, new_comm)
end
end

function _p4est_to_new_comm_old_subset_new(ptr_pXest, ptr_pXest_conn, old_comm, new_comm)
function _pXest_to_new_comm_old_subset_new(::Type{Val{Dc}},ptr_pXest, ptr_pXest_conn, old_comm, new_comm) where Dc
if (GridapDistributed.i_am_in(new_comm))
new_comm_num_parts = GridapDistributed.num_parts(new_comm)
global_first_quadrant = Vector{P4est_wrapper.p4est_gloidx_t}(undef,new_comm_num_parts+1)
Expand All @@ -1771,38 +1813,38 @@ function _p4est_to_new_comm_old_subset_new(ptr_pXest, ptr_pXest_conn, old_comm,
global_first_quadrant[i] = old_global_first_quadrant[end]
end
MPI.Bcast!(global_first_quadrant,0,new_comm)
quadrants = P4est_wrapper.p4est_deflate_quadrants(ptr_pXest,C_NULL)
p4est_comm_count_pertree(ptr_pXest,pertree)
quadrants = pXest_deflate_quadrants(Val{Dc},ptr_pXest,C_NULL)
pXest_comm_count_pertree(Val{Dc},ptr_pXest,pertree)
MPI.Bcast!(pertree,0,new_comm)
else
MPI.Bcast!(global_first_quadrant,0,new_comm)
quadrants = sc_array_new_count(sizeof(p4est_quadrant_t), 0)
MPI.Bcast!(pertree,0,new_comm)
end
return P4est_wrapper.p4est_inflate(new_comm,
ptr_pXest_conn,
global_first_quadrant,
pertree,
quadrants,
C_NULL,
C_NULL)
return pXest_inflate(Val{Dc},
new_comm,
ptr_pXest_conn,
global_first_quadrant,
pertree,
quadrants,
C_NULL,
C_NULL)
else
return nothing
end
end

function _p4est_to_new_comm_old_supset_new(ptr_pXest, ptr_pXest_conn, old_comm, new_comm)
function _pXest_to_new_comm_old_supset_new(::Type{Val{Dc}},ptr_pXest, ptr_pXest_conn, old_comm, new_comm) where Dc
@assert GridapDistributed.i_am_in(old_comm)
pXest = ptr_pXest[]
pXest_conn = ptr_pXest_conn[]

pertree = Vector{P4est_wrapper.p4est_gloidx_t}(undef,pXest_conn.num_trees+1)
p4est_comm_count_pertree(ptr_pXest,pertree)
pXest_comm_count_pertree(Val{Dc},ptr_pXest,pertree)

if (GridapDistributed.i_am_in(new_comm))
new_comm_num_parts = GridapDistributed.num_parts(new_comm)
global_first_quadrant = Vector{P4est_wrapper.p4est_gloidx_t}(undef,new_comm_num_parts+1)
pXest=ptr_pXest[]
old_comm_num_parts = GridapDistributed.num_parts(old_comm)
old_global_first_quadrant = unsafe_wrap(Array,
pXest.global_first_quadrant,
Expand All @@ -1815,15 +1857,16 @@ function _p4est_to_new_comm_old_supset_new(ptr_pXest, ptr_pXest_conn, old_comm,
for i = 1:length(new_global_first_quadrant)
global_first_quadrant[i] = old_global_first_quadrant[i]
end
quadrants = P4est_wrapper.p4est_deflate_quadrants(ptr_pXest,C_NULL)

return P4est_wrapper.p4est_inflate(new_comm,
ptr_pXest_conn,
global_first_quadrant,
pertree,
quadrants,
C_NULL,
C_NULL)
quadrants = pXest_deflate_quadrants(Val{Dc},ptr_pXest,C_NULL)

return pXest_inflate(Val{Dc},
new_comm,
ptr_pXest_conn,
global_first_quadrant,
pertree,
quadrants,
C_NULL,
C_NULL)
else
return nothing
end
Expand Down Expand Up @@ -1998,7 +2041,8 @@ function _redistribute_parts_subseteq_parts_redistributed(model::OctreeDistribut
if (parts_redistributed_model === model.parts)
ptr_pXest_old = model.ptr_pXest
else
ptr_pXest_old = _p4est_to_new_comm(model.ptr_pXest,
ptr_pXest_old = _pXest_to_new_comm(Val{Dc},
model.ptr_pXest,
model.ptr_pXest_connectivity,
model.parts.comm,
parts.comm)
Expand Down Expand Up @@ -2080,7 +2124,8 @@ function _redistribute_parts_supset_parts_redistributed(

# ptr_pXest_old is distributed over supset_comm
# once created, ptr_pXest_new is distributed over subset_comm
ptr_pXest_new = _p4est_to_new_comm(ptr_pXest_old,
ptr_pXest_new = _pXest_to_new_comm(Val{Dc},
ptr_pXest_old,
model.ptr_pXest_connectivity,
supset_comm,
subset_comm)
Expand Down
24 changes: 12 additions & 12 deletions test/OctreeDistributedDiscreteModelsTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -44,25 +44,25 @@ module OctreeDistributedDiscreteModelsTests
return level_parts
end

function run_with_env(distribute,subdomains,num_parts_x_level)
ranks=distribute(LinearIndices((prod(subdomains),)))
function run_with_env(distribute,np,nc,num_parts_x_level)
ranks = distribute(LinearIndices((prod(np),)))
GridapP4est.with(ranks) do
run(distribute,subdomains,num_parts_x_level)
run(distribute,np,nc,num_parts_x_level)
end
end

function run(distribute,subdomains,num_parts_x_level)
ranks=distribute(LinearIndices((prod(subdomains),)))
if length(subdomains) == 2
function run(distribute,np,nc,num_parts_x_level)
ranks = distribute(LinearIndices((prod(np),)))
if length(nc) == 2
domain=(0,1,0,1)
else
@assert length(subdomains) == 3
@assert length(nc) == 3
domain=(0,1,0,1,0,1)
end

# Generate model
level_parts = generate_level_parts(ranks,num_parts_x_level)
coarse_model = CartesianDiscreteModel(domain,subdomains)
coarse_model = CartesianDiscreteModel(domain,nc)
model = OctreeDistributedDiscreteModel(level_parts[2],coarse_model,1)
vmodel1 = GridapP4est.VoidOctreeDistributedDiscreteModel(model,ranks)
vmodel2 = GridapP4est.VoidOctreeDistributedDiscreteModel(coarse_model,ranks)
Expand All @@ -87,7 +87,7 @@ module OctreeDistributedDiscreteModelsTests
f_model_tasks_L2_back, dglueL1toL2 = redistribute(fmodel_tasks_L1,level_parts[2])

# Coarsening
model_back,glue = coarsen(f_model_tasks_L2_back)
model_back, glue = coarsen(f_model_tasks_L2_back)

if GridapDistributed.i_am_in(level_parts[2])
@test num_cells(model_back)==num_cells(model)
Expand All @@ -103,10 +103,10 @@ module OctreeDistributedDiscreteModelsTests
model = OctreeDistributedDiscreteModel(level_parts[1],coarse_model,3)
imodel = model
for i=1:3
omodel,glue=coarsen(imodel)
imodel=omodel
omodel, glue = coarsen(imodel)
imodel = omodel
end
@test num_cells(imodel)==prod(subdomains)
@test num_cells(imodel) == prod(nc)
nothing
end
end # module
10 changes: 8 additions & 2 deletions test/mpi/OctreeDistributedDiscreteModelsNoEnvTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,14 @@ import .OctreeDistributedDiscreteModelsTests as TestModule

MPI.Init()

# 2D
with_mpi() do distribute
TestModule.run(distribute,(2,2),[4,2])
end
TestModule.run(distribute,4,(2,2),[4,2])
end

# 3D
with_mpi() do distribute
TestModule.run(distribute,4,(2,2,2),[4,2])
end

MPI.Finalize()
10 changes: 8 additions & 2 deletions test/mpi/OctreeDistributedDiscreteModelsTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,14 @@ using PartitionedArrays
include("../OctreeDistributedDiscreteModelsTests.jl")
import .OctreeDistributedDiscreteModelsTests as TestModule

# 2D
with_mpi() do distribute
TestModule.run_with_env(distribute,(2,2),[4,2])
end
TestModule.run_with_env(distribute,4,(2,2),[4,2])
end

# 3D
with_mpi() do distribute
TestModule.run_with_env(distribute,4,(2,2,2),[4,2])
end

MPI.Finalize()

0 comments on commit 058f3f1

Please sign in to comment.