-
Hi, I would like advices on the MFEM community on how to impose a a Dirichlet boundary condition on a given point. This kind of boundary conditions is very helpfull in implicit quasi-static non linear mechanics to handle rigid body motions. Maybe it will also shade lights (for me) on how essential boundary dofs are treated in my code (called To make things clear, I already have defined two classes handling Dirichlet boundary conditions:
The interfaces of my classes mostly contains essentially two methods:
It seems to work as expected in sequential and parallel computations. So far so good, but I do rely on My idea is to create a new class which would handle a Dirichlet boundary condition on the closest vertice to a given point. I need to do two things:
The first point is the most difficult. Please find below the code that I use, which seems to work correctly. Maybe it could help other users. Best, Thomas template <std::size_t space_dimension, bool parallel>
static std::optional<size_type> getDegreeOfFreedomForClosestNode(
FiniteElementSpace<parallel>& fes,
const Mesh<parallel>& mesh,
std::array<real, space_dimension> pt,
const size_type c) {
const auto dim = mesh.Dimension();
if (dim != space_dimension) {
raise(
"getDegreeOfFreedomForClosestNode: "
"unmatched space dimension");
}
GridFunction<parallel> nodes(&fes);
mesh.GetNodes(nodes);
const auto bynodes = fes.GetOrdering() == mfem::Ordering::byNODES;
const auto size = nodes.Size() / dim;
auto index = [bynodes, size](const size_type i,
const size_type j) -> size_type {
if (bynodes) {
return i + j * size;
}
return i * space_dimension + j;
};
// Traversal of all nodes to detect the closest vertice
auto min_distance = std::numeric_limits<real>::max();
// global dof id associated with the closest vertice
auto local_dof_id = size_type{};
#ifdef MFEM_USE_MPI
auto global_dof_id = HYPRE_BigInt{};
#else /* MFEM_USE_MPI */
auto global_dof_id = size_type{};
#endif /* MFEM_USE_MPI */
// local dof id associated with the closest vertice, if handled by the
// current process
if (size == 0) {
if constexpr (!parallel) {
// honestly, don't know if this case may happen.
// does not harm
return std::optional<size_type>{};
}
}
for (size_type i = 0; i != size; ++i) {
auto d2 = real{};
for (size_type j = 0; j < space_dimension; ++j) {
const auto pc = nodes[index(i, j)];
d2 += (pc - pt[j]) * (pc - pt[j]);
}
const auto d = std::sqrt(d2);
if (d < min_distance) {
if constexpr (parallel) {
local_dof_id = fes.GetLocalTDofNumber(index(i, c));
global_dof_id = fes.GetGlobalTDofNumber(index(i, c));
} else {
local_dof_id = index(i, c);
}
min_distance = d;
}
}
auto dof_id = std::optional<size_type>{};
#ifdef MFEM_USE_MPI
if constexpr (parallel) {
static_assert(std::is_same_v<size_type, int>, "invalid integer types");
static_assert(std::is_same_v<HYPRE_BigInt, int>, "invalid integer types");
int nbranks, myrank;
MPI_Comm_size(MPI_COMM_WORLD, &nbranks);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
std::vector<double> d_min_buffer(nbranks, -1);
std::vector<HYPRE_BigInt> global_dof_id_buffer(nbranks, -1);
double d_min = min_distance;
// gathering all minimum values and global ides overs all processes
MPI_Allgather(&d_min, 1, MPI_DOUBLE, d_min_buffer.data(), 1, MPI_DOUBLE,
MPI_COMM_WORLD);
MPI_Allgather(&global_dof_id, 1, MPI_INT, global_dof_id_buffer.data(),
1, MPI_INT, MPI_COMM_WORLD);
// locate the minimum among the mimum values
auto result = std::min_element(d_min_buffer.begin(), d_min_buffer.end());
// locate on which process we have the minimum
const auto target_pid = result - d_min_buffer.begin();
if (global_dof_id == global_dof_id_buffer[target_pid]) {
dof_id = local_dof_id;
}
} else {
dof_id = local_dof_id;
}
#else /* MFEM_USE_MPI */
dof_id = local_dof_id;
#endif /* MFEM_USE_MPI */
return dof_id;
} // end of getDegreeOfFreedomForClosestNode
template <std::size_t space_dimension>
static std::optional<size_type> getDegreeOfFreedomForClosestNode(
FiniteElementDiscretization& fed,
std::array<real, space_dimension> pt,
const size_type c) {
if (fed.describesAParallelComputation()) {
#ifdef MFEM_USE_MPI
auto& fes = fed.getFiniteElementSpace<true>();
const auto& mesh = fed.getMesh<true>();
return getDegreeOfFreedomForClosestNode<space_dimension, true>(fes, mesh,
pt, c);
#else
raise(
"getDegreeOfFreedomForClosestNode: "
"unsupported parallel computations");
#endif
}
auto& fes = fed.getFiniteElementSpace<false>();
const auto& mesh = fed.getMesh<false>();
return getDegreeOfFreedomForClosestNode<space_dimension, false>(fes, mesh,
pt, c);
} // end of getDegreeOfFreedomForClosestNode Note: this is almost standard |
Beta Was this translation helpful? Give feedback.
Replies: 1 comment
-
The proposed code seems working, so this comment is made to close the discussion. |
Beta Was this translation helpful? Give feedback.
The proposed code seems working, so this comment is made to close the discussion.